text stringlengths 4 1.02M | meta dict |
|---|---|
def NumberGameIII(S):
flag_max = True
min_number = min(S)
for elem in S:
quotient = float(elem)/float(min_number)
flag = quotient - int(quotient)
if flag != 0.0:
flag_max = False
if flag_max:
return min_number
else:
return -1
| {
"content_hash": "bee18ecdb1481d056936be3b1fd4e305",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 48,
"avg_line_length": 23.923076923076923,
"alnum_prop": 0.5144694533762058,
"repo_name": "TheSriram/codefightchallenges",
"id": "a13d06b688a82c0ea015b72936e8beca106a5392",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numbergamesIII/numbergamesIII.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1972"
}
],
"symlink_target": ""
} |
from utils.splunk_search_util import (
get_search_response,
get_session_key,
preprocess_search,
run_search,
)
def test_extend_index(eventgen_test_helper):
"""Test extendIndexes config"""
eventgen_test_helper("eventgen_extend_index.conf").get_events()
session_key = get_session_key()
search_job_id = run_search(
session_key, preprocess_search("index=main sourcetype=cisco")
)
test_index_search_job_id = run_search(
session_key, preprocess_search("index=test_*")
)
main_events = get_search_response(session_key, search_job_id)
test_index_events = get_search_response(session_key, test_index_search_job_id)
assert len(main_events) == 12
assert len(test_index_events) == 12
| {
"content_hash": "9340fc748b242511011b515202bff44f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 82,
"avg_line_length": 32.56521739130435,
"alnum_prop": 0.6795727636849133,
"repo_name": "splunk/eventgen",
"id": "f2eb3a3be6e2b4f45abd0b25bacb6c51c6cb259d",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/large/test_extend_index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1494"
},
{
"name": "HTML",
"bytes": "1713"
},
{
"name": "Makefile",
"bytes": "6372"
},
{
"name": "Python",
"bytes": "552120"
},
{
"name": "Ruby",
"bytes": "74"
},
{
"name": "Shell",
"bytes": "1408"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
_VERSION = '1.8.0-rc0'
REQUIRED_PACKAGES = [
'absl-py >= 0.1.6',
'astor >= 0.6.0',
'gast >= 0.2.0',
'numpy >= 1.13.3',
'six >= 1.10.0',
'protobuf >= 3.4.0',
'tensorboard >= 1.7.0, < 1.8.0',
'termcolor >= 1.1.0',
]
if sys.byteorder == 'little':
# grpcio does not build correctly on big-endian machines due to lack of
# BoringSSL support.
# See https://github.com/tensorflow/tensorflow/issues/17882.
REQUIRED_PACKAGES.append('grpcio >= 1.8.6')
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# tf-nightly should depend on tb-nightly
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
REQUIRED_PACKAGES[i] = 'tb-nightly >= 1.8.0a0, < 1.9.0a0'
break
# weakref.finalize and enum were introduced in Python 3.4
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
REQUIRED_PACKAGES.append('enum34 >= 1.1.6')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main',
'toco_from_protos = tensorflow.contrib.lite.toco.python.toco_from_protos:main',
'toco = tensorflow.contrib.lite.toco.python.toco_wrapper:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:run_main',
]
# pylint: enable=line-too-long
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:run_main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
)
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description='TensorFlow helps the tensors flow',
long_description='',
url='https://www.tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',)
| {
"content_hash": "9ac68eb2609090f7ab2fef1fb6d04b34",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 83,
"avg_line_length": 33.265306122448976,
"alnum_prop": 0.6533742331288344,
"repo_name": "eaplatanios/tensorflow",
"id": "f676f040ad3ebc7f377c7a4c224c5dba95f2f108",
"size": "8840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/tools/pip_package/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "163987"
},
{
"name": "C++",
"bytes": "34944901"
},
{
"name": "CMake",
"bytes": "5123"
},
{
"name": "CSS",
"bytes": "9206"
},
{
"name": "Go",
"bytes": "1047216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "423531"
},
{
"name": "JavaScript",
"bytes": "3127"
},
{
"name": "Jupyter Notebook",
"bytes": "1833814"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "19718973"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Scala",
"bytes": "3606806"
},
{
"name": "Shell",
"bytes": "352897"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
'''
Copyright 2015 Ivan Sadikov
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# import os, sys and update path
import os
import sys
# set default path as an external directory of the module
DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(DIR_PATH)
# import libs
import unittest
# select what tests to run
_RUN_TESTS = {
"exceptions": True,
"loading": True,
"utils": True,
"algorithms": True,
"relcomp_alg": True,
"query_engine": True,
"selector": True,
"analyser": True,
"datamanager": True,
"core_attribute": True,
"core": True,
"core_map": True,
"core_processor": True,
"service": True,
"integration": True
}
def _checkTest(key):
return key in _RUN_TESTS and _RUN_TESTS[key]
def _collectSystemTests(suites):
# exceptions
if _checkTest("exceptions"):
import analytics.exceptions.tests.unittest_exceptions as unittest_exceptions
suites.addTest(unittest_exceptions.loadSuites())
else:
print "@skip: exceptions tests"
# loading
if _checkTest("loading"):
import analytics.loading.tests.unittest_loading as unittest_loading
suites.addTest(unittest_loading.loadSuites())
else:
print "@skip: loading tests"
# utils
if _checkTest("utils"):
import analytics.utils.tests.unittest_utils as unittest_utils
suites.addTest(unittest_utils.loadSuites())
else:
print "@skip: utils tests"
# algorithms
if _checkTest("algorithms"):
import analytics.algorithms.tests.unittest_algorithms as unittest_algorithms
suites.addTest(unittest_algorithms.loadSuites())
else:
print "@skip: algorithms tests"
# relative comparion algorithm
if _checkTest("relcomp_alg"):
import analytics.algorithms.tests.unittest_relativecomp as unittest_relativecomp
suites.addTest(unittest_relativecomp.loadSuites())
else:
print "@skip: relative comparison algorithm tests"
# query engine
if _checkTest("query_engine"):
import analytics.utils.tests.unittest_queryengine as unittest_queryengine
suites.addTest(unittest_queryengine.loadSuites())
else:
print "@skip: query engine tests"
# selector
if _checkTest("selector"):
import analytics.selector.tests.unittest_selector as unittest_selector
suites.addTest(unittest_selector.loadSuites())
else:
print "@skip: selector tests"
# analyser
if _checkTest("analyser"):
import analytics.analyser.tests.unittest_analyser as unittest_analyser
suites.addTest(unittest_analyser.loadSuites())
else:
print "@skip: analyser tests"
# datamanager
if _checkTest("datamanager"):
import analytics.datamanager.tests.unittest_datamanager as unittest_datamanager
suites.addTest(unittest_datamanager.loadSuites())
else:
print "@skip: data manager tests"
# core attributes
if _checkTest("core_attribute"):
import analytics.core.tests.unittest_core_attribute as unittest_core_attribute
suites.addTest(unittest_core_attribute.loadSuites())
else:
print "@skip: core attribute tests"
# core
if _checkTest("core"):
import analytics.core.tests.unittest_core as unittest_core
suites.addTest(unittest_core.loadSuites())
else:
print "@skip: core tests"
# core maps
if _checkTest("core_map"):
import analytics.core.tests.unittest_core_map as unittest_core_map
suites.addTest(unittest_core_map.loadSuites())
else:
print "@skip: core map tests"
# core processor
if _checkTest("core_processor"):
import analytics.core.tests.unittest_core_processor as unittest_core_processor
suites.addTest(unittest_core_processor.loadSuites())
else:
print "@skip: core processor tests"
# service
if _checkTest("service"):
import analytics.tests.unittest_service as unittest_service
suites.addTest(unittest_service.loadSuites())
else:
print "@skip: service tests"
# integration tests
if _checkTest("integration"):
import analytics.tests.integrationtest_service as integrationtest_service
suites.addTest(integrationtest_service.loadSuites())
else:
print "@skip: integration tests"
if __name__ == '__main__':
suites = unittest.TestSuite()
print ""
print "### [:Analytics] Gathering tests info ###"
print "-" * 70
_collectSystemTests(suites)
print ""
print "### [:Analytics] Running tests ###"
print "-" * 70
unittest.TextTestRunner(verbosity=2).run(suites)
num = len([x for x in _RUN_TESTS.values() if not x])
print "%s Number of test blocks skipped: %d" %("OK" if num==0 else "WARN", num)
print ""
| {
"content_hash": "497bfd06ac648365468bbcd14bc83887",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 88,
"avg_line_length": 32.218934911242606,
"alnum_prop": 0.6637281910009183,
"repo_name": "sadikovi/pulsar",
"id": "f639b467eaee974194a0e948b28a135dbed01067",
"size": "5468",
"binary": false,
"copies": "1",
"ref": "refs/heads/analytics",
"path": "analytics/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6019"
},
{
"name": "HTML",
"bytes": "20112"
},
{
"name": "JavaScript",
"bytes": "119345"
},
{
"name": "Python",
"bytes": "281497"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
from ref.demo_items import create_full_test_data
from scm.models import LogicalComponentVersion, InstallationMethod, Delivery, \
InstallableItem
from ref.models import LogicalComponent, ComponentImplementationClass
from django.db.transaction import atomic
@atomic
def create_test_is():
res = []
create_full_test_data()
lc_rdbms_module1 = LogicalComponent.objects.get(name="main database", application__alternate_name_1='SFT1')
lc_rdbms_module2 = LogicalComponent.objects.get(name="main database", application__alternate_name_1='SFT2')
# Versions (independent of II)
rdbms1_v1 = LogicalComponentVersion(version='v1', logical_component=lc_rdbms_module1)
rdbms1_v1.save()
rdbms1_v2 = LogicalComponentVersion(version='v1.2', logical_component=lc_rdbms_module1)
rdbms1_v2.save()
rdbms1_v3 = LogicalComponentVersion(version='v1.3', logical_component=lc_rdbms_module1)
rdbms1_v3.save()
rdbms1_vr1 = LogicalComponentVersion(version='v-1', logical_component=lc_rdbms_module1)
rdbms1_vr1.save()
rdbms1_vr2 = LogicalComponentVersion(version='v-2', logical_component=lc_rdbms_module1)
rdbms1_vr2.save()
rdbms1_vr3 = LogicalComponentVersion(version='v-3', logical_component=lc_rdbms_module1)
rdbms1_vr3.save()
rdbms2_v1 = LogicalComponentVersion(version='a', logical_component=lc_rdbms_module2)
rdbms2_v1.save()
rdbms2_v2 = LogicalComponentVersion(version='b', logical_component=lc_rdbms_module2)
rdbms2_v2.save()
rdbms2_v3 = LogicalComponentVersion(version='c', logical_component=lc_rdbms_module2)
rdbms2_v3.save()
rdbms2_v4 = LogicalComponentVersion(version='d', logical_component=lc_rdbms_module2)
rdbms2_v4.save()
rdbms2_v5 = LogicalComponentVersion(version='e', logical_component=lc_rdbms_module2)
rdbms2_v5.save()
rdbms2_v6 = LogicalComponentVersion(version='f', logical_component=lc_rdbms_module2)
rdbms2_v6.save()
# Installation methods (independent of IS)
rdbms1_meth1 = InstallationMethod(name='Scripts SQL Oracle v1.0', halts_service=True)
rdbms1_meth1.save()
rdbms1_meth1.method_compatible_with.add(ComponentImplementationClass.objects.get(name='soft1_database_main_oracle'), ComponentImplementationClass.objects.get(name='int_database_main_oracle'))
rdbms1_meth2 = InstallationMethod(name='Scripts SQL MySQL v1.0', halts_service=True)
rdbms1_meth2.save()
rdbms1_meth2.method_compatible_with.add(ComponentImplementationClass.objects.get(name='int_database_main_mysql_dedicated'))
# First IS
is1 = Delivery(name='SYSTEM1_INIT', description='Initial delivery')
is1.save()
is1_ii1 = InstallableItem(what_is_installed=rdbms1_v1, belongs_to_set=is1, is_full=True, data_loss=True)
is1_ii1.save()
is1_ii1.how_to_install.add(rdbms1_meth1)
is1_ii2 = InstallableItem(what_is_installed=rdbms2_v1, belongs_to_set=is1, is_full=True, data_loss=True)
is1_ii2.save()
is1_ii2.how_to_install.add(rdbms1_meth1)
res.append(is1)
# Second IS
is2 = Delivery(name='SYSTEM1_2', description='Solves all issues. Once again.')
is2.save()
is2_ii1 = InstallableItem(what_is_installed=rdbms1_v2, belongs_to_set=is2)
is2_ii1.save()
is2_ii1.how_to_install.add(rdbms1_meth1)
is2_ii2 = InstallableItem(what_is_installed=rdbms2_v2, belongs_to_set=is2)
is2_ii2.save()
is2_ii2.how_to_install.add(rdbms1_meth1)
is2_ii1.dependsOn(rdbms1_v1, '==')
is2_ii2.dependsOn(rdbms2_v1, '==')
res.append(is2)
# Third IS
is3 = Delivery(name='SYSTEM1_3', description='blah.')
is3.save()
is3_ii1 = InstallableItem(what_is_installed=rdbms2_v3, belongs_to_set=is3)
is3_ii1.save()
is3_ii1.how_to_install.add(rdbms1_meth1)
is3_ii1.dependsOn(rdbms2_v1, '==')
is3_ii1.dependsOn(rdbms1_v1, '>=')
res.append(is3)
# Fourth IS
is4 = Delivery(name='SYSTEM1_4', description='blah.')
is4.save()
is4_ii1 = InstallableItem(what_is_installed=rdbms2_v4, belongs_to_set=is4)
is4_ii1.save()
is4_ii1.how_to_install.add(rdbms1_meth1)
is4_ii1.dependsOn(rdbms2_v3, '>=')
is4_ii1.dependsOn(rdbms1_v2, '==')
res.append(is4)
# Fifth IS
is5 = Delivery(name='SYSTEM1_5', description='blah.')
is5.save()
is5_ii1 = InstallableItem(what_is_installed=rdbms2_v5, belongs_to_set=is5)
is5_ii1.save()
is5_ii1.how_to_install.add(rdbms1_meth1)
is5_ii1.dependsOn(rdbms2_v1, '>=')
is5_ii1.dependsOn(rdbms2_v3, '<=')
is5_ii1.dependsOn(rdbms1_v2, '==')
res.append(is5)
# Sixth IS: same version - different deps
is6 = Delivery(name='SYSTEM1_6', description='blah.')
is6.save()
is6_ii1 = InstallableItem(what_is_installed=rdbms2_v5, belongs_to_set=is6)
is6_ii1.save()
is6_ii1.how_to_install.add(rdbms1_meth1)
is6_ii1.dependsOn(rdbms2_v4, '==')
is6_ii1.dependsOn(rdbms1_v2, '==')
res.append(is6)
# Seventh IS: final one - will not be applied, just to keep something to be applied for manual tests
is7 = Delivery(name='SYSTEM1_7', description='blah.')
is7.save()
is7_ii1 = InstallableItem(what_is_installed=rdbms2_v6, belongs_to_set=is7)
is7_ii1.save()
is7_ii1.how_to_install.add(rdbms1_meth1)
is7_ii1.dependsOn(rdbms2_v5, '==')
is7_ii1.dependsOn(rdbms1_v2, '==')
res.append(is6)
# Reverse chain IS 1 (version -1)
isr1 = Delivery(name='SYSTEM1_RV1', description='blah.')
isr1.save()
isr1_ii1 = InstallableItem(what_is_installed=rdbms1_vr1, belongs_to_set=isr1)
isr1_ii1.save()
isr1_ii1.how_to_install.add(rdbms1_meth1)
isr1_ii1.dependsOn(rdbms1_v1, '<=')
res.append(isr1)
# Reverse chain IS 2 (version -2)
isr2 = Delivery(name='SYSTEM1_RV2', description='blah.')
isr2.save()
isr2_ii1 = InstallableItem(what_is_installed=rdbms1_vr2, belongs_to_set=isr2)
isr2_ii1.save()
isr2_ii1.how_to_install.add(rdbms1_meth1)
isr2_ii1.dependsOn(rdbms1_vr1, '<=')
# Reverse chain IS 3 (version -3)
isr3 = Delivery(name='SYSTEM1_RV3', description='blah.')
isr3.save()
isr3_ii1 = InstallableItem(what_is_installed=rdbms1_vr3, belongs_to_set=isr3)
isr3_ii1.save()
isr3_ii1.how_to_install.add(rdbms1_meth1)
isr2_ii1.dependsOn(rdbms1_vr2, '>=')
return res
| {
"content_hash": "8cdc3a6e1e69be07c4969c78d56f2900",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 195,
"avg_line_length": 35.14525139664804,
"alnum_prop": 0.6943252265140677,
"repo_name": "digitalfox/MAGE",
"id": "47fd824deb4827afeb8692d055d9506fba943e98",
"size": "6307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scm/demo_items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15540"
},
{
"name": "HTML",
"bytes": "84897"
},
{
"name": "JavaScript",
"bytes": "6024"
},
{
"name": "Python",
"bytes": "335279"
},
{
"name": "Shell",
"bytes": "17844"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APITestCase
from cyidentity.cycontacts.models import Contact, Address, EmailAddress, PhoneNumber
import json
class ContactAPITestCase(APITestCase):
def setUp(self):
Contact.objects.all().delete()
PhoneNumber.objects.all().delete()
EmailAddress.objects.all().delete()
Address.objects.all().delete()
User.objects.all().delete()
self.contact = Contact.objects.create(
first_name='foo',
last_name='bar'
)
self.phone_number = PhoneNumber.objects.create(
contact=self.contact,
phone_number='555-555-5555'
)
self.address = Address.objects.create(
contact=self.contact,
line1='555 Any street',
city='Anytown',
state='MD',
zip_code='57252'
)
self.email_address = EmailAddress.objects.create(
contact=self.contact,
email='foo@bar.com'
)
self.user = User.objects.create_superuser(
username='admin', password='admin', email='foo@admin.com'
)
self.client.login(username='admin', password='admin')
def test_get_contact(self):
response = self.client.get('/api/identity/contacts/%d/' % self.contact.id)
self.assertEqual(response.data.get('last_name'), 'bar')
self.assertEqual(response.data.get('first_name'), 'foo')
def test_create_contact(self):
contact = {
'last_name': u'Bing',
'first_name': u'Larry',
'title': 'Sir',
'organization_name': 'Department of Silly Walks',
'date_of_birth': '1900-03-02',
'prefix_title': 'Mr',
'website': 'http://example.org',
'organization_unit': 'Department of Pointless Arguments',
'date_of_death': '2020-05-02',
'suffix_title': 'Sr',
'middle_name': 'Billy',
'nickname': 'Bobby',
'gender': 'm',
'avatar': u'http://example.default-avatar.jpg'
}
response = self.client.post('/api/identity/contacts/', contact, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
contact = Contact.objects.filter(last_name='Bing')[0]
self.assertEqual(contact.title, 'Sir')
def test_update_contact(self):
contact = Contact.objects.create(
first_name='Bad',
last_name='Name'
)
response = self.client.get('/api/identity/contacts/%d/' % contact.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = {'first_name': 'Good', 'last_name': 'Name'}
response = self.client.put('/api/identity/contacts/%d/' % contact.id, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('first_name'), 'Good')
def test_delete_contact(self):
contact = Contact.objects.create(
first_name='Mr',
last_name='Man'
)
response = self.client.get('/api/identity/contacts/%d/' % contact.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.delete('/api/identity/contacts/%d/' % contact.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
| {
"content_hash": "42c8df85119564ed8fc61378a9536004",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 86,
"avg_line_length": 34.2970297029703,
"alnum_prop": 0.5894919168591224,
"repo_name": "shawnhermans/cyborgcrm",
"id": "81185a1d893d2ac2ab61d1ea7d2027030a4961c7",
"size": "3464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyidentity/cycontacts/tests/test_api.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "26682"
},
{
"name": "DIGITAL Command Language",
"bytes": "667"
},
{
"name": "HTML",
"bytes": "405415"
},
{
"name": "JavaScript",
"bytes": "735"
},
{
"name": "Python",
"bytes": "100893"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
} |
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import importutils
import osprofiler.notifier
from osprofiler import profiler
import osprofiler.web
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import base as objects_base
from cinder.openstack.common import loopingcall
from cinder.openstack.common import service
from cinder import rpc
from cinder import version
from cinder import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Interval, in seconds, between nodes reporting state '
'to datastore'),
cfg.IntOpt('periodic_interval',
default=60,
help='Interval, in seconds, between running periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range, in seconds, to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.StrOpt('osapi_volume_listen',
default="0.0.0.0",
help='IP address on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_listen_port',
default=8776,
help='Port on which OpenStack Volume API listens'),
cfg.IntOpt('osapi_volume_workers',
help='Number of workers for OpenStack Volume API service. '
'The default is equal to the number of CPUs available.'), ]
profiler_opts = [
cfg.BoolOpt("profiler_enabled", default=False,
help=_('If False fully disable profiling feature.')),
cfg.BoolOpt("trace_sqlalchemy", default=False,
help=_("If False doesn't trace SQL requests."))
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.register_opts(profiler_opts, group="profiler")
def setup_profiler(binary, host):
if CONF.profiler.profiler_enabled:
_notifier = osprofiler.notifier.create(
"Messaging", messaging, context.get_admin_context().to_dict(),
rpc.TRANSPORT, "cinder", binary, host)
osprofiler.notifier.set(_notifier)
LOG.warning(
_LW("OSProfiler is enabled.\nIt means that person who knows "
"any of hmac_keys that are specified in "
"/etc/cinder/api-paste.ini can trace his requests. \n"
"In real life only operator can read this file so there "
"is no security issue. Note that even if person can "
"trigger profiler, only admin user can retrieve trace "
"information.\n"
"To disable OSprofiler set in cinder.conf:\n"
"[profiler]\nenabled=false"))
else:
osprofiler.web.disable()
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, *args, **kwargs):
super(Service, self).__init__()
if not rpc.initialized():
rpc.init(CONF)
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
manager_class = profiler.trace_cls("rpc")(manager_class)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.basic_config_check()
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
setup_profiler(binary, host)
def start(self):
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
self.manager.init_host()
ctxt = context.get_admin_context()
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
serializer = objects_base.CinderObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
self.manager.init_host_with_rpc()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(
self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
def basic_config_check(self):
"""Perform basic config checks before starting service."""
# Make sure report interval is less than service down time
if self.report_interval:
if CONF.service_down_time <= self.report_interval:
new_down_time = int(self.report_interval * 2.5)
LOG.warning(
_LW("Report interval must be less than service down "
"time. Current config service_down_time: "
"%(service_down_time)s, report_interval for this: "
"service is: %(report_interval)s. Setting global "
"service_down_time to: %(new_down_time)s"),
{'service_down_time': CONF.service_down_time,
'report_interval': self.report_interval,
'new_down_time': new_down_time})
CONF.set_override('service_down_time', new_down_time)
def _create_service_ref(self, context):
zone = CONF.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
self.service_id = service_ref['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('cinder-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
super(Service, self).stop()
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
zone = CONF.storage_availability_zone
state_catalog = {}
try:
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
LOG.debug('The service database object disappeared, '
'recreating it.')
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
state_catalog['report_count'] = service_ref['report_count'] + 1
if zone != service_ref['availability_zone']:
state_catalog['availability_zone'] = zone
db.service_update(ctxt,
self.service_id, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
except db_exc.DBConnectionError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('model server went away'))
# NOTE(jsbryant) Other DB errors can happen in HA configurations.
# such errors shouldn't kill this thread, so we handle them here.
except db_exc.DBError:
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('DBError encountered: '))
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % name, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)d is invalid, "
"must be greater than 0.") %
{'worker_name': worker_name,
'workers': self.workers})
raise exception.InvalidInput(msg)
setup_profiler(name, self.host)
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def process_launcher():
return service.ProcessLauncher()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(server, workers=workers)
def wait():
LOG.debug('Full set of CONF:')
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and
("mysql:" in flag_get or "postgresql:" in flag_get))):
LOG.debug('%s : FLAG SET ', flag)
else:
LOG.debug('%(flag)s : %(flag_get)s',
{'flag': flag, 'flag_get': flag_get})
try:
_launcher.wait()
except KeyboardInterrupt:
_launcher.stop()
rpc.cleanup()
class Launcher(object):
def __init__(self):
self.launch_service = serve
self.wait = wait
def get_launcher():
# Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows
# due to missing support of non-blocking I/O pipes. For this reason, the
# service must be spawned differently on Windows, using the ServiceLauncher
# class instead.
if os.name == 'nt':
return Launcher()
else:
return process_launcher()
| {
"content_hash": "dd59d7e4564d9e8f5b8bf406331a97d1",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 79,
"avg_line_length": 36.68791208791209,
"alnum_prop": 0.5799436889714251,
"repo_name": "tmenjo/cinder-2015.1.1",
"id": "26403912e729c7c3fd85f4c71c68daee72301679",
"size": "17463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10804398"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.ConfFiles import ConfFile, ConfFiles
from PLC.Nodes import Node, Nodes
from PLC.Auth import Auth
class AddConfFileToNode(Method):
"""
Adds a configuration file to the specified node. If the node is
already linked to the configuration file, no errors are returned.
Returns 1 if successful, faults otherwise.
"""
roles = ['admin']
accepts = [
Auth(),
ConfFile.fields['conf_file_id'],
Mixed(Node.fields['node_id'],
Node.fields['hostname'])
]
returns = Parameter(int, '1 if successful')
def call(self, auth, conf_file_id, node_id_or_hostname):
# Get configuration file
conf_files = ConfFiles(self.api, [conf_file_id])
if not conf_files:
raise PLCInvalidArgument("No such configuration file")
conf_file = conf_files[0]
# Get node
nodes = Nodes(self.api, [node_id_or_hostname])
if not nodes:
raise PLCInvalidArgument("No such node")
node = nodes[0]
if node['peer_id'] is not None:
raise PLCInvalidArgument("Not a local node")
# Link configuration file to node
if node['node_id'] not in conf_file['node_ids']:
conf_file.add_node(node)
# Log affected objects
self.event_objects = {'ConfFile': [conf_file_id],
'Node': [node['node_id']] }
return 1
| {
"content_hash": "f333830d678b0ffcbd90c9530417ae2f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 69,
"avg_line_length": 29.96078431372549,
"alnum_prop": 0.606020942408377,
"repo_name": "dreibh/planetlab-lxc-plcapi",
"id": "404b8e215409bc436fcecd8ef32225212b57cbf3",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PLC/Methods/AddConfFileToNode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "724"
},
{
"name": "Makefile",
"bytes": "2995"
},
{
"name": "PHP",
"bytes": "574445"
},
{
"name": "PLpgSQL",
"bytes": "2764"
},
{
"name": "Perl",
"bytes": "1350"
},
{
"name": "Python",
"bytes": "871238"
},
{
"name": "Shell",
"bytes": "31392"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
import logging
import re
import uuid
from collections import defaultdict
from django import forms
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.utils import six
from django.utils.six.moves.urllib.error import HTTPError, URLError
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.hostingsvcs.bugtracker import BugTracker
from reviewboard.hostingsvcs.errors import (AuthorizationError,
HostingServiceError,
InvalidPlanError,
RepositoryError,
TwoFactorAuthCodeRequiredError)
from reviewboard.hostingsvcs.forms import HostingServiceForm
from reviewboard.hostingsvcs.hook_utils import (close_all_review_requests,
get_git_branch_name,
get_review_request_id,
get_server_url)
from reviewboard.hostingsvcs.repository import RemoteRepository
from reviewboard.hostingsvcs.service import (HostingService,
HostingServiceClient)
from reviewboard.hostingsvcs.utils.paginator import (APIPaginator,
ProxyPaginator)
from reviewboard.scmtools.core import Branch, Commit
from reviewboard.scmtools.errors import FileNotFoundError, SCMError
from reviewboard.site.urlresolvers import local_site_reverse
class GitHubPublicForm(HostingServiceForm):
github_public_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<username>/<repo_name>/'))
class GitHubPrivateForm(HostingServiceForm):
github_private_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<username>/<repo_name>/'))
class GitHubPublicOrgForm(HostingServiceForm):
github_public_org_name = forms.CharField(
label=_('Organization name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the organization. This is the '
'<org_name> in '
'http://github.com/<org_name>/<repo_name>/'))
github_public_org_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<org_name>/<repo_name>/'))
class GitHubPrivateOrgForm(HostingServiceForm):
github_private_org_name = forms.CharField(
label=_('Organization name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the organization. This is the '
'<org_name> in '
'http://github.com/<org_name>/<repo_name>/'))
github_private_org_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The name of the repository. This is the '
'<repo_name> in '
'http://github.com/<org_name>/<repo_name>/'))
class GitHubAPIPaginator(APIPaginator):
"""Paginates over GitHub API list resources.
This is returned by some GitHubClient functions in order to handle
iteration over pages of results, without resorting to fetching all
pages at once or baking pagination into the functions themselves.
"""
start_query_param = 'page'
per_page_query_param = 'per_page'
LINK_RE = re.compile(r'\<(?P<url>[^>]+)\>; rel="(?P<rel>[^"]+)",? *')
def fetch_url(self, url):
"""Fetches the page data from a URL."""
data, headers = self.client.api_get(url, return_headers=True)
# Find all the links in the Link header and key off by the link
# name ('prev', 'next', etc.).
links = dict(
(m.group('rel'), m.group('url'))
for m in self.LINK_RE.finditer(headers.get('Link', ''))
)
return {
'data': data,
'headers': headers,
'prev_url': links.get('prev'),
'next_url': links.get('next'),
}
class GitHubClient(HostingServiceClient):
RAW_MIMETYPE = 'application/vnd.github.v3.raw'
def __init__(self, hosting_service):
super(GitHubClient, self).__init__(hosting_service)
self.account = hosting_service.account
#
# HTTP method overrides
#
def http_delete(self, url, *args, **kwargs):
data, headers = super(GitHubClient, self).http_delete(
url, *args, **kwargs)
self._check_rate_limits(headers)
return data, headers
def http_get(self, url, *args, **kwargs):
data, headers = super(GitHubClient, self).http_get(
url, *args, **kwargs)
self._check_rate_limits(headers)
return data, headers
def http_post(self, url, *args, **kwargs):
data, headers = super(GitHubClient, self).http_post(
url, *args, **kwargs)
self._check_rate_limits(headers)
return data, headers
#
# API wrappers around HTTP/JSON methods
#
def api_delete(self, url, *args, **kwargs):
try:
data, headers = self.json_delete(url, *args, **kwargs)
return data
except (URLError, HTTPError) as e:
self._check_api_error(e)
def api_get(self, url, return_headers=False, *args, **kwargs):
"""Performs an HTTP GET to the GitHub API and returns the results.
If `return_headers` is True, then the result of each call (or
each generated set of data, if using pagination) will be a tuple
of (data, headers). Otherwise, the result will just be the data.
"""
try:
data, headers = self.json_get(url, *args, **kwargs)
if return_headers:
return data, headers
else:
return data
except (URLError, HTTPError) as e:
self._check_api_error(e)
def api_get_list(self, url, start=None, per_page=None, *args, **kwargs):
"""Performs an HTTP GET to a GitHub API and returns a paginator.
This returns a GitHubAPIPaginator that's used to iterate over the
pages of results. Each page contains information on the data and
headers from that given page.
The ``start`` and ``per_page`` parameters can be used to control
where pagination begins and how many results are returned per page.
``start`` is a 0-based index representing a page number.
"""
if start is not None:
# GitHub uses 1-based indexing, so add one.
start += 1
return GitHubAPIPaginator(self, url, start=start, per_page=per_page)
def api_post(self, url, *args, **kwargs):
try:
data, headers = self.json_post(url, *args, **kwargs)
return data
except (URLError, HTTPError) as e:
self._check_api_error(e)
#
# Higher-level API methods
#
def api_get_blob(self, repo_api_url, path, sha):
url = self._build_api_url(repo_api_url, 'git/blobs/%s' % sha)
try:
return self.http_get(url, headers={
'Accept': self.RAW_MIMETYPE,
})[0]
except (URLError, HTTPError):
raise FileNotFoundError(path, sha)
def api_get_commits(self, repo_api_url, start=None):
url = self._build_api_url(repo_api_url, 'commits')
if start:
url += '&sha=%s' % start
try:
return self.api_get(url)
except Exception as e:
logging.warning('Failed to fetch commits from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def api_get_compare_commits(self, repo_api_url, parent_revision, revision):
# If the commit has a parent commit, use GitHub's "compare two commits"
# API to get the diff. Otherwise, fetch the commit itself.
if parent_revision:
url = self._build_api_url(
repo_api_url,
'compare/%s...%s' % (parent_revision, revision))
else:
url = self._build_api_url(repo_api_url, 'commits/%s' % revision)
try:
comparison = self.api_get(url)
except Exception as e:
logging.warning('Failed to fetch commit comparison from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
if parent_revision:
tree_sha = comparison['base_commit']['commit']['tree']['sha']
else:
tree_sha = comparison['commit']['tree']['sha']
return comparison['files'], tree_sha
def api_get_heads(self, repo_api_url):
url = self._build_api_url(repo_api_url, 'git/refs/heads')
try:
rsp = self.api_get(url)
return [ref for ref in rsp if ref['ref'].startswith('refs/heads/')]
except Exception as e:
logging.warning('Failed to fetch commits from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def api_get_issue(self, repo_api_url, issue_id):
url = self._build_api_url(repo_api_url, 'issues/%s' % issue_id)
try:
return self.api_get(url)
except Exception as e:
logging.warning('GitHub: Failed to fetch issue from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
def api_get_remote_repositories(self, api_url, owner, owner_type,
filter_type=None, start=None,
per_page=None):
url = api_url
if owner_type == 'organization':
url += 'orgs/%s/repos' % owner
elif owner_type == 'user':
if owner == self.account.username:
# All repositories belonging to an authenticated user.
url += 'user/repos'
else:
# Only public repositories for the user.
url += 'users/%s/repos' % owner
else:
raise ValueError(
"owner_type must be 'organization' or 'user', not %r'"
% owner_type)
if filter_type:
url += '?type=%s' % (filter_type or 'all')
return self.api_get_list(self._build_api_url(url),
start=start, per_page=per_page)
def api_get_remote_repository(self, api_url, owner, repository_id):
try:
return self.api_get(self._build_api_url(
'%srepos/%s/%s' % (api_url, owner, repository_id)))
except HostingServiceError as e:
if e.http_code == 404:
return None
else:
raise
def api_get_tree(self, repo_api_url, sha, recursive=False):
url = self._build_api_url(repo_api_url, 'git/trees/%s' % sha)
if recursive:
url += '&recursive=1'
try:
return self.api_get(url)
except Exception as e:
logging.warning('Failed to fetch tree from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e))
#
# Internal utilities
#
def _build_api_url(self, *api_paths):
url = '/'.join(api_paths)
if '?' in url:
url += '&'
else:
url += '?'
url += 'access_token=%s' % self.account.data['authorization']['token']
return url
def _check_rate_limits(self, headers):
rate_limit_remaining = headers.get('X-RateLimit-Remaining', None)
try:
if (rate_limit_remaining is not None and
int(rate_limit_remaining) <= 100):
logging.warning('GitHub rate limit for %s is down to %s',
self.account.username, rate_limit_remaining)
except ValueError:
pass
def _check_api_error(self, e):
data = e.read()
try:
rsp = json.loads(data)
except:
rsp = None
if rsp and 'message' in rsp:
response_info = e.info()
x_github_otp = response_info.get('X-GitHub-OTP', '')
if x_github_otp.startswith('required;'):
raise TwoFactorAuthCodeRequiredError(
_('Enter your two-factor authentication code. '
'This code will be sent to you by GitHub.'),
http_code=e.code)
if e.code == 401:
raise AuthorizationError(rsp['message'], http_code=e.code)
raise HostingServiceError(rsp['message'], http_code=e.code)
else:
raise HostingServiceError(six.text_type(e), http_code=e.code)
class GitHub(HostingService, BugTracker):
name = _('GitHub')
plans = [
('public', {
'name': _('Public'),
'form': GitHubPublicForm,
'repository_fields': {
'Git': {
'path': 'git://github.com/%(hosting_account_username)s/'
'%(github_public_repo_name)s.git',
'mirror_path': 'git@github.com:'
'%(hosting_account_username)s/'
'%(github_public_repo_name)s.git',
}
},
'bug_tracker_field': 'http://github.com/'
'%(hosting_account_username)s/'
'%(github_public_repo_name)s/'
'issues#issue/%%s',
}),
('public-org', {
'name': _('Public Organization'),
'form': GitHubPublicOrgForm,
'repository_fields': {
'Git': {
'path': 'git://github.com/%(github_public_org_name)s/'
'%(github_public_org_repo_name)s.git',
'mirror_path': 'git@github.com:%(github_public_org_name)s/'
'%(github_public_org_repo_name)s.git',
}
},
'bug_tracker_field': 'http://github.com/'
'%(github_public_org_name)s/'
'%(github_public_org_repo_name)s/'
'issues#issue/%%s',
}),
('private', {
'name': _('Private'),
'form': GitHubPrivateForm,
'repository_fields': {
'Git': {
'path': 'git@github.com:%(hosting_account_username)s/'
'%(github_private_repo_name)s.git',
'mirror_path': '',
},
},
'bug_tracker_field': 'http://github.com/'
'%(hosting_account_username)s/'
'%(github_private_repo_name)s/'
'issues#issue/%%s',
}),
('private-org', {
'name': _('Private Organization'),
'form': GitHubPrivateOrgForm,
'repository_fields': {
'Git': {
'path': 'git@github.com:%(github_private_org_name)s/'
'%(github_private_org_repo_name)s.git',
'mirror_path': '',
},
},
'bug_tracker_field': 'http://github.com/'
'%(github_private_org_name)s/'
'%(github_private_org_repo_name)s/'
'issues#issue/%%s',
}),
]
needs_authorization = True
supports_bug_trackers = True
supports_post_commit = True
supports_repositories = True
supports_two_factor_auth = True
supports_list_remote_repositories = True
supported_scmtools = ['Git']
client_class = GitHubClient
repository_url_patterns = patterns(
'',
url(r'^hooks/close-submitted/$',
'reviewboard.hostingsvcs.github.post_receive_hook_close_submitted'),
)
# This should be the prefix for every field on the plan forms.
plan_field_prefix = 'github'
def get_api_url(self, hosting_url):
"""Returns the API URL for GitHub.
This can be overridden to provide more advanced lookup (intended
for the GitHub Enterprise support).
"""
assert not hosting_url
return 'https://api.github.com/'
def get_plan_field(self, plan, plan_data, name):
"""Returns the value of a field for plan-specific data.
This takes into account the plan type and hosting service ID.
"""
key = '%s_%s_%s' % (self.plan_field_prefix, plan.replace('-', '_'),
name)
return plan_data[key]
def check_repository(self, plan=None, *args, **kwargs):
"""Checks the validity of a repository.
This will perform an API request against GitHub to get
information on the repository. This will throw an exception if
the repository was not found, and return cleanly if it was found.
"""
try:
repo_info = self.client.api_get(
self._build_api_url(
self._get_repo_api_url_raw(
self._get_repository_owner_raw(plan, kwargs),
self._get_repository_name_raw(plan, kwargs))))
except HostingServiceError as e:
if e.http_code == 404:
if plan in ('public', 'private'):
raise RepositoryError(
_('A repository with this name was not found, or your '
'user may not own it.'))
elif plan == 'public-org':
raise RepositoryError(
_('A repository with this organization or name was '
'not found.'))
elif plan == 'private-org':
raise RepositoryError(
_('A repository with this organization or name was '
'not found, or your user may not have access to '
'it.'))
raise
if 'private' in repo_info:
is_private = repo_info['private']
if is_private and plan in ('public', 'public-org'):
raise RepositoryError(
_('This is a private repository, but you have selected '
'a public plan.'))
elif not is_private and plan in ('private', 'private-org'):
raise RepositoryError(
_('This is a public repository, but you have selected '
'a private plan.'))
def authorize(self, username, password, hosting_url,
two_factor_auth_code=None, local_site_name=None,
*args, **kwargs):
site = Site.objects.get_current()
siteconfig = SiteConfiguration.objects.get_current()
site_base_url = '%s%s' % (
site.domain,
local_site_reverse('root', local_site_name=local_site_name))
site_url = '%s://%s' % (siteconfig.get('site_domain_method'),
site_base_url)
note = 'Access for Review Board (%s - %s)' % (
site_base_url,
uuid.uuid4().hex[:7])
try:
body = {
'scopes': [
'user',
'repo',
],
'note': note,
'note_url': site_url,
}
# If the site is using a registered GitHub application,
# send it in the requests. This will gain the benefits of
# a GitHub application, such as higher rate limits.
if (hasattr(settings, 'GITHUB_CLIENT_ID') and
hasattr(settings, 'GITHUB_CLIENT_SECRET')):
body.update({
'client_id': settings.GITHUB_CLIENT_ID,
'client_secret': settings.GITHUB_CLIENT_SECRET,
})
headers = {}
if two_factor_auth_code:
headers['X-GitHub-OTP'] = two_factor_auth_code
rsp, headers = self.client.json_post(
url=self.get_api_url(hosting_url) + 'authorizations',
username=username,
password=password,
headers=headers,
body=json.dumps(body))
except (HTTPError, URLError) as e:
data = e.read()
try:
rsp = json.loads(data)
except:
rsp = None
if rsp and 'message' in rsp:
response_info = e.info()
x_github_otp = response_info.get('X-GitHub-OTP', '')
if x_github_otp.startswith('required;'):
raise TwoFactorAuthCodeRequiredError(
_('Enter your two-factor authentication code '
'and re-enter your password to link your account. '
'This code will be sent to you by GitHub.'))
raise AuthorizationError(rsp['message'])
else:
raise AuthorizationError(six.text_type(e))
self._save_auth_data(rsp)
def is_authorized(self):
return ('authorization' in self.account.data and
'token' in self.account.data['authorization'])
def get_reset_auth_token_requires_password(self):
"""Returns whether or not resetting the auth token requires a password.
A password will be required if not using a GitHub client ID or
secret.
"""
if not self.is_authorized():
return True
app_info = self.account.data['authorization']['app']
client_id = app_info.get('client_id', '')
has_client = (client_id.strip('0') != '')
return (not has_client or
(not (hasattr(settings, 'GITHUB_CLIENT_ID') and
hasattr(settings, 'GITHUB_CLIENT_SECRET'))))
def reset_auth_token(self, password=None, two_factor_auth_code=None):
"""Resets the authorization token for the linked account.
This will attempt to reset the token in a few different ways,
depending on how the token was granted.
Tokens linked to a registered GitHub OAuth app can be reset without
requiring any additional credentials.
Tokens linked to a personal account (which is the case on most
installations) require a password and possibly a two-factor auth
code. Callers should call get_reset_auth_token_requires_password()
before determining whether to pass a password, and should pass
a two-factor auth code if this raises TwoFactorAuthCodeRequiredError.
"""
if self.is_authorized():
token = self.account.data['authorization']['token']
else:
token = None
if self.get_reset_auth_token_requires_password():
assert password
if self.account.local_site:
local_site_name = self.account.local_site.name
else:
local_site_name = None
if token:
try:
self._delete_auth_token(
self.account.data['authorization']['id'],
password=password,
two_factor_auth_code=two_factor_auth_code)
except HostingServiceError as e:
# If we get a 404 Not Found, then the authorization was
# probably already deleted.
if e.http_code != 404:
raise
self.account.data['authorization'] = ''
self.account.save()
# This may produce errors, which we want to bubble up.
self.authorize(self.account.username, password,
self.account.hosting_url,
two_factor_auth_code=two_factor_auth_code,
local_site_name=local_site_name)
else:
# We can use the new API for resetting the token without
# re-authenticating.
auth_data = self._reset_authorization(
settings.GITHUB_CLIENT_ID,
settings.GITHUB_CLIENT_SECRET,
token)
self._save_auth_data(auth_data)
def get_file(self, repository, path, revision, *args, **kwargs):
repo_api_url = self._get_repo_api_url(repository)
return self.client.api_get_blob(repo_api_url, path, revision)
def get_file_exists(self, repository, path, revision, *args, **kwargs):
try:
repo_api_url = self._get_repo_api_url(repository)
self.client.api_get_blob(repo_api_url, path, revision)
return True
except FileNotFoundError:
return False
def get_branches(self, repository):
repo_api_url = self._get_repo_api_url(repository)
refs = self.client.api_get_heads(repo_api_url)
results = []
for ref in refs:
name = ref['ref'][len('refs/heads/'):]
results.append(Branch(id=name,
commit=ref['object']['sha'],
default=(name == 'master')))
return results
def get_commits(self, repository, branch=None, start=None):
repo_api_url = self._get_repo_api_url(repository)
commits = self.client.api_get_commits(repo_api_url, start=start)
results = []
for item in commits:
commit = Commit(
item['commit']['author']['name'],
item['sha'],
item['commit']['committer']['date'],
item['commit']['message'])
if item['parents']:
commit.parent = item['parents'][0]['sha']
results.append(commit)
return results
def get_change(self, repository, revision):
repo_api_url = self._get_repo_api_url(repository)
# Step 1: fetch the commit itself that we want to review, to get
# the parent SHA and the commit message. Hopefully this information
# is still in cache so we don't have to fetch it again.
commit = cache.get(repository.get_commit_cache_key(revision))
if commit:
author_name = commit.author_name
date = commit.date
parent_revision = commit.parent
message = commit.message
else:
commit = self.client.api_get_commits(repo_api_url, revision)[0]
author_name = commit['commit']['author']['name']
date = commit['commit']['committer']['date'],
parent_revision = commit['parents'][0]['sha']
message = commit['commit']['message']
# Step 2: Get the diff and tree from the "compare commits" API
files, tree_sha = self.client.api_get_compare_commits(
repo_api_url, parent_revision, revision)
# Step 3: fetch the tree for the original commit, so that we can get
# full blob SHAs for each of the files in the diff.
tree = self.client.api_get_tree(repo_api_url, tree_sha, recursive=True)
file_shas = {}
for file in tree['tree']:
file_shas[file['path']] = file['sha']
diff = []
for file in files:
filename = file['filename']
status = file['status']
try:
patch = file['patch']
except KeyError:
continue
diff.append('diff --git a/%s b/%s' % (filename, filename))
if status == 'modified':
old_sha = file_shas[filename]
new_sha = file['sha']
diff.append('index %s..%s 100644' % (old_sha, new_sha))
diff.append('--- a/%s' % filename)
diff.append('+++ b/%s' % filename)
elif status == 'added':
new_sha = file['sha']
diff.append('new file mode 100644')
diff.append('index %s..%s' % ('0' * 40, new_sha))
diff.append('--- /dev/null')
diff.append('+++ b/%s' % filename)
elif status == 'removed':
old_sha = file_shas[filename]
diff.append('deleted file mode 100644')
diff.append('index %s..%s' % (old_sha, '0' * 40))
diff.append('--- a/%s' % filename)
diff.append('+++ /dev/null')
diff.append(patch)
diff = '\n'.join(diff)
# Make sure there's a trailing newline
if not diff.endswith('\n'):
diff += '\n'
return Commit(author_name, revision, date, message, parent_revision,
diff=diff)
def get_remote_repositories(self, owner=None, owner_type='user',
filter_type=None, start=None, per_page=None):
"""Return a list of remote repositories matching the given criteria.
This will look up each remote repository on GitHub that the given
owner either owns or is a member of.
If the plan is an organization plan, then `owner` is expected to be
an organization name, and the resulting repositories with be ones
either owned by that organization or that the organization is a member
of, and can be accessed by the authenticated user.
If the plan is a public or private plan, and `owner` is the current
user, then that user's public and private repositories or ones
they're a member of will be returned.
Otherwise, `owner` is assumed to be another GitHub user, and their
accessible repositories that they own or are a member of will be
returned.
`owner` defaults to the linked account's username, and `plan`
defaults to 'public'.
"""
if owner is None and owner_type == 'user':
owner = self.account.username
assert owner
url = self.get_api_url(self.account.hosting_url)
paginator = self.client.api_get_remote_repositories(
url, owner, owner_type, filter_type, start, per_page)
return ProxyPaginator(
paginator,
normalize_page_data_func=lambda page_data: [
RemoteRepository(
self,
repository_id='%s/%s' % (repo['owner']['login'],
repo['name']),
name=repo['name'],
owner=repo['owner']['login'],
scm_type='Git',
path=repo['clone_url'],
mirror_path=repo['mirror_url'],
extra_data=repo)
for repo in page_data
])
def get_remote_repository(self, repository_id):
"""Get the remote repository for the ID.
The ID is expected to be an ID returned from get_remote_repositories(),
in the form of "owner/repo_id".
If the repository is not found, ObjectDoesNotExist will be raised.
"""
parts = repository_id.split('/')
repo = None
if len(parts) == 2:
repo = self.client.api_get_remote_repository(
self.get_api_url(self.account.hosting_url),
*parts)
if not repo:
raise ObjectDoesNotExist
return RemoteRepository(self,
repository_id=repository_id,
name=repo['name'],
owner=repo['owner']['login'],
scm_type='Git',
path=repo['clone_url'],
mirror_path=repo['mirror_url'],
extra_data=repo)
def get_bug_info_uncached(self, repository, bug_id):
"""Get the bug info from the server."""
result = {
'summary': '',
'description': '',
'status': '',
}
repo_api_url = self._get_repo_api_url(repository)
try:
issue = self.client.api_get_issue(repo_api_url, bug_id)
result = {
'summary': issue['title'],
'description': issue['body'],
'status': issue['state'],
}
except:
# Errors in fetching are already logged in api_get_issue
pass
return result
def _reset_authorization(self, client_id, client_secret, token):
"""Resets the authorization info for an OAuth app-linked token.
If the token is associated with a registered OAuth application,
its token will be reset, without any authentication details required.
"""
url = '%sapplications/%s/tokens/%s' % (
self.get_api_url(self.account.hosting_url),
client_id,
token)
# Allow any errors to bubble up
return self.client.api_post(url=url,
username=client_id,
password=client_secret)
def _delete_auth_token(self, auth_id, password, two_factor_auth_code=None):
"""Requests that an authorization token be deleted.
This will delete the authorization token with the given ID. It
requires a password and, depending on the settings, a two-factor
authentication code to perform the deletion.
"""
headers = {}
if two_factor_auth_code:
headers['X-GitHub-OTP'] = two_factor_auth_code
url = self._build_api_url(
'%sauthorizations/%s' % (
self.get_api_url(self.account.hosting_url),
auth_id))
self.client.api_delete(url=url,
headers=headers,
username=self.account.username,
password=password)
def _save_auth_data(self, auth_data):
"""Saves authorization data sent from GitHub."""
self.account.data['authorization'] = auth_data
self.account.save()
def _build_api_url(self, *api_paths):
return self.client._build_api_url(*api_paths)
def _get_repo_api_url(self, repository):
plan = repository.extra_data['repository_plan']
return self._get_repo_api_url_raw(
self._get_repository_owner_raw(plan, repository.extra_data),
self._get_repository_name_raw(plan, repository.extra_data))
def _get_repo_api_url_raw(self, owner, repo_name):
return '%srepos/%s/%s' % (self.get_api_url(self.account.hosting_url),
owner, repo_name)
def _get_repository_owner_raw(self, plan, extra_data):
if plan in ('public', 'private'):
return self.account.username
elif plan in ('public-org', 'private-org'):
return self.get_plan_field(plan, extra_data, 'name')
else:
raise InvalidPlanError(plan)
def _get_repository_name_raw(self, plan, extra_data):
return self.get_plan_field(plan, extra_data, 'repo_name')
@require_POST
def post_receive_hook_close_submitted(request, *args, **kwargs):
"""Closes review requests as submitted automatically after a push."""
try:
payload = json.loads(request.body)
except ValueError as e:
logging.error('The payload is not in JSON format: %s', e)
return HttpResponse(status=415)
server_url = get_server_url(request)
review_id_to_commits = _get_review_id_to_commits_map(payload, server_url)
if not review_id_to_commits:
return HttpResponse()
close_all_review_requests(review_id_to_commits)
return HttpResponse()
def _get_review_id_to_commits_map(payload, server_url):
"""Returns a dictionary, mapping a review request ID to a list of commits.
If a commit's commit message does not contain a review request ID, we append
the commit to the key None.
"""
review_id_to_commits_map = defaultdict(list)
ref_name = payload.get('ref')
if not ref_name:
return None
branch_name = get_git_branch_name(ref_name)
if not branch_name:
return None
commits = payload.get('commits', [])
for commit in commits:
commit_hash = commit.get('id')
commit_message = commit.get('message')
review_request_id = get_review_request_id(commit_message, server_url,
commit_hash)
commit_entry = '%s (%s)' % (branch_name, commit_hash[:7])
review_id_to_commits_map[review_request_id].append(commit_entry)
return review_id_to_commits_map
| {
"content_hash": "f89d5377c8ffa5ee45123cf0f56272bd",
"timestamp": "",
"source": "github",
"line_count": 1022,
"max_line_length": 80,
"avg_line_length": 37.31800391389432,
"alnum_prop": 0.5380581556936469,
"repo_name": "1tush/reviewboard",
"id": "179735f66ca6dfffb8ffa2b9a2b11860e8be0153",
"size": "38139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/hostingsvcs/github.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "685"
},
{
"name": "C#",
"bytes": "340"
},
{
"name": "CSS",
"bytes": "157867"
},
{
"name": "Java",
"bytes": "340"
},
{
"name": "JavaScript",
"bytes": "1256833"
},
{
"name": "Objective-C",
"bytes": "288"
},
{
"name": "PHP",
"bytes": "278"
},
{
"name": "Perl",
"bytes": "103"
},
{
"name": "Python",
"bytes": "3124372"
},
{
"name": "Ruby",
"bytes": "172"
},
{
"name": "Shell",
"bytes": "963"
}
],
"symlink_target": ""
} |
import unittest
import sys
sys.path.append('..')
from bs4 import BeautifulSoup
from app import app
from app.user import User
class TestLoginAndLogout(unittest.TestCase):
""" Class to test login and logout of users """
def setUp(self):
app.config['TESTING'] = True
app.debug = False
app.config['WTF_CSRF_ENABLED'] = True
self.baseURL = 'http://localhost:5000'
self.client = app.test_client()
def delete_test_user(self, username):
""" deletes the user from database """
user = User(username, 'dummy')
user.delete_user()
def add_test_user(self, username, password):
""" adds a user to the database so that we can use it for testing
"""
user = User(username, password)
# delete the user if its already in the database
# just to be sure that its not part of the last
# unsucessful run
user.delete_user()
rv = user.add_user()
assert rv
def generate_shortURL(self):
return 'tinyurl765'
def getcsrf_value(self):
""" returns the csrf token by sending a dummy request """
# I spent a good amount of time making the csr_token invalidation with the
# configuration work, but seems i end up trouble making it work.so the easier
# way is to parse the code and get the csr value. This was due to bug
# https://github.com/lepture/flask-wtf/issues/208
rv = self.client.get('/')
soup = BeautifulSoup(rv.data, 'html.parser')
tag = soup.body.find('input', attrs={'name': 'csrf_token'})
return tag['value']
def login(self, username, password):
""" login a user given a usename and password """
post_data = {'email': username,
'password': password,
'csrf_token': self.getcsrf_value(),
'submit': 'Login'}
return self.client.post('/login', data=post_data,
follow_redirects=True)
def logout(self):
""" Logout a logged in user """
post_data = {'csrf_token': self.getcsrf_value(),
'submit': 'Logout'}
return self.client.post('/logout', data=post_data,
follow_redirects=True)
def test_user_login_logout(self):
""" create test users and login and logout to make sure
it works properly
"""
username = 'user@gmail.com'
password = 'mypassword'
self.add_test_user(username, password)
rv = self.login(username, password)
assert b'Hi user@gmail.com!' in rv.data
assert b'/logout' in rv.data
rv = self.logout()
assert b'Logout Successful' in rv.data
assert b'/login' in rv.data
self.delete_test_user(username)
def test_invalid_login_logout(self):
""" test loggin in with a wrong username and password """
username = 'invaliduser@gmail.com'
password = 'password'
rv = self.login(username, password)
assert rv.status_code == 200
assert b'Invalid e-mail or password' in rv.data
assert b'/login' in rv.data
assert b'/logout' not in rv.data
rv = self.logout()
assert rv.status_code == 401
def test_invalid_login_password(self):
""" Test loggin in with a valid user name but with invalid password """
username = 'invaliduser@gmail.com'
password = 'password'
password2 = 'password2'
self.add_test_user(username, password)
rv = self.login(username, password2)
assert rv.status_code == 200
assert b'Invalid e-mail or password' in rv.data
assert b'/login' in rv.data
assert b'/logout' not in rv.data
rv = self.logout()
assert rv.status_code == 401
self.delete_test_user(username)
def test_url_shorten_when_user_logged_in(self):
""" post a shorten request when the user is logged inside """
# monkeypatch the generate shortURL so that we know
# the correct value to expect and perform validation
# accordingly
from app.models import urlshortener
urlshortener.urlShortener.generateShortUrl = self.generate_shortURL
username = 'validuser@gmail.com'
password = 'password'
post_data = {'url': 'http://www.google.com/',
'submit': 'Shorten',
'csrf_token': self.getcsrf_value()}
self.add_test_user(username, password)
self.login(username, password)
rv = self.client.post('/',
data=post_data,
follow_redirects=False)
self.assertEqual(rv.status_code, 200)
shorturl = self.baseURL + '/' + self.generate_shortURL()
assert shorturl in str(rv.data)
# cleanup so next time it works
urlshort = urlshortener.urlShortener()
urlshort.removeUrl(self.generate_shortURL())
self.logout()
self.delete_test_user(username)
| {
"content_hash": "3fd9e0804d4d741450c0a2b7efaf7fc0",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 88,
"avg_line_length": 32.756410256410255,
"alnum_prop": 0.5874755381604697,
"repo_name": "PradheepShrinivasan/picourl",
"id": "c8adaf34def5338ff2585c9b9cd8779809e0701e",
"size": "5110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_login.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "309"
},
{
"name": "HTML",
"bytes": "7637"
},
{
"name": "Makefile",
"bytes": "455"
},
{
"name": "Python",
"bytes": "37033"
}
],
"symlink_target": ""
} |
"""A library that uses the recon service to map lat/lng to DC places.
See latlng_recon_service_test.py for usage example.
"""
import requests
_RECON_ROOT = 'https://api.datacommons.org/v1/recon/resolve/coordinate'
_RECON_COORD_BATCH_SIZE = 50
def _call_resolve_coordinates(id2latlon, filter_fn, verbose):
revmap = {}
coords = []
for dcid, (lat, lon) in id2latlon.items():
coords.append({'latitude': lat, 'longitude': lon})
revmap[(lat, lon)] = dcid
result = {}
if verbose:
print('Calling recon API with a lat/lon list of', len(id2latlon))
resp = requests.post(_RECON_ROOT, json={'coordinates': coords})
resp.raise_for_status()
if verbose:
print('Got successful recon API response')
for coord in resp.json()['placeCoordinates']:
# Zero lat/lons are missing
# (https://github.com/datacommonsorg/mixer/issues/734)
if 'latitude' not in coord:
coord['latitude'] = 0.0
if 'longitude' not in coord:
coord['longitude'] = 0.0
key = (coord['latitude'], coord['longitude'])
assert key in revmap, key
cips = []
if 'placeDcids' in coord:
cips = coord['placeDcids']
if filter_fn:
result[revmap[key]] = filter_fn(cips)
else:
result[revmap[key]] = cips
return result
def latlng2places(id2latlon, filter_fn=None, verbose=False):
"""Given a map of ID->(lat,lng), resolves the lat/lng and returns a list of
places by calling the Recon service (in a batched way).
Args:
id2latlon: A dict from any distinct ID to lat/lng. The response uses the
same ID as key.
filter_fn: An optional function that takes a list of place DCIDs and
may return a subset of them. For example, if you want to
filter out only countries.
verbose: Print debug messages during execution.
Returns:
A dict keyed by the ID passed in "id2latlon" with value containing a
list of places.
"""
batch = {}
result = {}
for dcid, (lat, lon) in id2latlon.items():
batch[dcid] = (lat, lon)
if len(batch) == _RECON_COORD_BATCH_SIZE:
result.update(_call_resolve_coordinates(batch, filter_fn, verbose))
batch = {}
if len(batch) > 0:
result.update(_call_resolve_coordinates(batch, filter_fn, verbose))
return result
| {
"content_hash": "21392038e25acb48b8c62183245f35cd",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6091019910605445,
"repo_name": "datacommonsorg/data",
"id": "d49f990559d2b88c5c82b3daff5682b53e3731e0",
"size": "3036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/latlng_recon_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "Go",
"bytes": "51446"
},
{
"name": "HTML",
"bytes": "32842253"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Jupyter Notebook",
"bytes": "5088443"
},
{
"name": "Python",
"bytes": "3723204"
},
{
"name": "R",
"bytes": "28607"
},
{
"name": "Shell",
"bytes": "25468"
},
{
"name": "TypeScript",
"bytes": "13472"
}
],
"symlink_target": ""
} |
{
'task': {
'taskid': str, # new, not change
'project': str, # new, not change
'url': str, # new, not change
'status': int, # change
'schedule': {
'priority': int,
'retries': int,
'retried': int,
'exetime': int,
'age': int,
'itag': str, #
#'recrawl': int
}, # new and restart
'fetch': {
'method': str,
'headers': dict,
'data': str,
'timeout': int,
}, # new and restart
'process': {
'callback': str,
'save': dict,
}, # new and restart
'track': {
'fetch': {
'ok': bool,
'time': int,
'status_code': int,
'headers': dict,
'encoding': str,
'content': str,
},
'process': {
'ok': bool,
'time': int,
'follows': int,
'outputs': int,
'logs': str,
'exception': str,
},
}, # finish
'lastcrawltime': int, # keep between request
'updatetime': int, # keep between request
}
}
class TaskDB(object):
ACTIVE = 1
SUCCESS = 2
FAILED = 3
BAD = 4
def load_tasks(self, status, project=None, fields=None):
raise NotImplementedError
def get_task(self, project, taskid, fields=None):
raise NotImplementedError
def status_count(self, project):
'''
return a dict
'''
raise NotImplementedError
def insert(self, project, taskid, obj={}):
raise NotImplementedError
def update(self, project, taskid, obj={}, **kwargs):
raise NotImplementedError
| {
"content_hash": "c38f5019cd9ff5f9dccde6af076421af",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 60,
"avg_line_length": 26,
"alnum_prop": 0.42735042735042733,
"repo_name": "wyrover/pyspider",
"id": "89f2e0a9e8c9a65c5b64240675a66df613cfa099",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database/base/taskdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import web
class Index(object):
def GET(self):
return "home"
| {
"content_hash": "021fe56c3008feebc35f87137680ae95",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 20,
"avg_line_length": 12.5,
"alnum_prop": 0.6,
"repo_name": "coupain/cs-s01-e01",
"id": "f62124c7b93c9f7331b9f16eb9ba71b5b906a413",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/app/controllers/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12917"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from math import ceil, floor, log
from abc import abstractmethod
from numbers import Integral
import numpy as np
from ._search import _check_param_grid
from ._search import BaseSearchCV
from . import ParameterGrid, ParameterSampler
from ..base import is_classifier
from ._split import check_cv, _yields_constant_splits
from ..utils import resample
from ..utils.multiclass import check_classification_targets
from ..utils.validation import _num_samples
__all__ = ["HalvingGridSearchCV", "HalvingRandomSearchCV"]
class _SubsampleMetaSplitter:
"""Splitter that subsamples a given fraction of the dataset"""
def __init__(self, *, base_cv, fraction, subsample_test, random_state):
self.base_cv = base_cv
self.fraction = fraction
self.subsample_test = subsample_test
self.random_state = random_state
def split(self, X, y, groups=None):
for train_idx, test_idx in self.base_cv.split(X, y, groups):
train_idx = resample(
train_idx,
replace=False,
random_state=self.random_state,
n_samples=int(self.fraction * train_idx.shape[0]),
)
if self.subsample_test:
test_idx = resample(
test_idx,
replace=False,
random_state=self.random_state,
n_samples=int(self.fraction * test_idx.shape[0]),
)
yield train_idx, test_idx
def _top_k(results, k, itr):
# Return the best candidates of a given iteration
iteration, mean_test_score, params = (
np.asarray(a)
for a in (results["iter"], results["mean_test_score"], results["params"])
)
iter_indices = np.flatnonzero(iteration == itr)
sorted_indices = np.argsort(mean_test_score[iter_indices])
return np.array(params[iter_indices][sorted_indices[-k:]])
class BaseSuccessiveHalving(BaseSearchCV):
"""Implements successive halving.
Ref:
Almost optimal exploration in multi-armed bandits, ICML 13
Zohar Karnin, Tomer Koren, Oren Somekh
"""
def __init__(
self,
estimator,
*,
scoring=None,
n_jobs=None,
refit=True,
cv=5,
verbose=0,
random_state=None,
error_score=np.nan,
return_train_score=True,
max_resources="auto",
min_resources="exhaust",
resource="n_samples",
factor=3,
aggressive_elimination=False,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
error_score=error_score,
return_train_score=return_train_score,
)
self.random_state = random_state
self.max_resources = max_resources
self.resource = resource
self.factor = factor
self.min_resources = min_resources
self.aggressive_elimination = aggressive_elimination
def _check_input_parameters(self, X, y, groups):
if self.scoring is not None and not (
isinstance(self.scoring, str) or callable(self.scoring)
):
raise ValueError(
"scoring parameter must be a string, "
"a callable or None. Multimetric scoring is not "
"supported."
)
# We need to enforce that successive calls to cv.split() yield the same
# splits: see https://github.com/scikit-learn/scikit-learn/issues/15149
if not _yields_constant_splits(self._checked_cv_orig):
raise ValueError(
"The cv parameter must yield consistent folds across "
"calls to split(). Set its random_state to an int, or set "
"shuffle=False."
)
if (
self.resource != "n_samples"
and self.resource not in self.estimator.get_params()
):
raise ValueError(
f"Cannot use resource={self.resource} which is not supported "
f"by estimator {self.estimator.__class__.__name__}"
)
if isinstance(self.max_resources, str) and self.max_resources != "auto":
raise ValueError(
"max_resources must be either 'auto' or a positive integer"
)
if self.max_resources != "auto" and (
not isinstance(self.max_resources, Integral) or self.max_resources <= 0
):
raise ValueError(
"max_resources must be either 'auto' or a positive integer"
)
if self.min_resources not in ("smallest", "exhaust") and (
not isinstance(self.min_resources, Integral) or self.min_resources <= 0
):
raise ValueError(
"min_resources must be either 'smallest', 'exhaust', "
"or a positive integer "
"no greater than max_resources."
)
if isinstance(self, HalvingRandomSearchCV):
if self.min_resources == self.n_candidates == "exhaust":
# for n_candidates=exhaust to work, we need to know what
# min_resources is. Similarly min_resources=exhaust needs to
# know the actual number of candidates.
raise ValueError(
"n_candidates and min_resources cannot be both set to " "'exhaust'."
)
if self.n_candidates != "exhaust" and (
not isinstance(self.n_candidates, Integral) or self.n_candidates <= 0
):
raise ValueError(
"n_candidates must be either 'exhaust' " "or a positive integer"
)
self.min_resources_ = self.min_resources
if self.min_resources_ in ("smallest", "exhaust"):
if self.resource == "n_samples":
n_splits = self._checked_cv_orig.get_n_splits(X, y, groups)
# please see https://gph.is/1KjihQe for a justification
magic_factor = 2
self.min_resources_ = n_splits * magic_factor
if is_classifier(self.estimator):
y = self._validate_data(X="no_validation", y=y)
check_classification_targets(y)
n_classes = np.unique(y).shape[0]
self.min_resources_ *= n_classes
else:
self.min_resources_ = 1
# if 'exhaust', min_resources_ might be set to a higher value later
# in _run_search
self.max_resources_ = self.max_resources
if self.max_resources_ == "auto":
if not self.resource == "n_samples":
raise ValueError(
"max_resources can only be 'auto' if resource='n_samples'"
)
self.max_resources_ = _num_samples(X)
if self.min_resources_ > self.max_resources_:
raise ValueError(
f"min_resources_={self.min_resources_} is greater "
f"than max_resources_={self.max_resources_}."
)
if self.min_resources_ == 0:
raise ValueError(
f"min_resources_={self.min_resources_}: you might have passed "
f"an empty dataset X."
)
if not isinstance(self.refit, bool):
raise ValueError(
f"refit is expected to be a boolean. Got {type(self.refit)} "
f"instead."
)
@staticmethod
def _select_best_index(refit, refit_metric, results):
"""Custom refit callable to return the index of the best candidate.
We want the best candidate out of the last iteration. By default
BaseSearchCV would return the best candidate out of all iterations.
Currently, we only support for a single metric thus `refit` and
`refit_metric` are not required.
"""
last_iter = np.max(results["iter"])
last_iter_indices = np.flatnonzero(results["iter"] == last_iter)
best_idx = np.argmax(results["mean_test_score"][last_iter_indices])
return last_iter_indices[best_idx]
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
self._checked_cv_orig = check_cv(
self.cv, y, classifier=is_classifier(self.estimator)
)
self._check_input_parameters(
X=X,
y=y,
groups=groups,
)
self._n_samples_orig = _num_samples(X)
super().fit(X, y=y, groups=groups, **fit_params)
# Set best_score_: BaseSearchCV does not set it, as refit is a callable
self.best_score_ = self.cv_results_["mean_test_score"][self.best_index_]
return self
def _run_search(self, evaluate_candidates):
candidate_params = self._generate_candidate_params()
if self.resource != "n_samples" and any(
self.resource in candidate for candidate in candidate_params
):
# Can only check this now since we need the candidates list
raise ValueError(
f"Cannot use parameter {self.resource} as the resource since "
"it is part of the searched parameters."
)
# n_required_iterations is the number of iterations needed so that the
# last iterations evaluates less than `factor` candidates.
n_required_iterations = 1 + floor(log(len(candidate_params), self.factor))
if self.min_resources == "exhaust":
# To exhaust the resources, we want to start with the biggest
# min_resources possible so that the last (required) iteration
# uses as many resources as possible
last_iteration = n_required_iterations - 1
self.min_resources_ = max(
self.min_resources_,
self.max_resources_ // self.factor ** last_iteration,
)
# n_possible_iterations is the number of iterations that we can
# actually do starting from min_resources and without exceeding
# max_resources. Depending on max_resources and the number of
# candidates, this may be higher or smaller than
# n_required_iterations.
n_possible_iterations = 1 + floor(
log(self.max_resources_ // self.min_resources_, self.factor)
)
if self.aggressive_elimination:
n_iterations = n_required_iterations
else:
n_iterations = min(n_possible_iterations, n_required_iterations)
if self.verbose:
print(f"n_iterations: {n_iterations}")
print(f"n_required_iterations: {n_required_iterations}")
print(f"n_possible_iterations: {n_possible_iterations}")
print(f"min_resources_: {self.min_resources_}")
print(f"max_resources_: {self.max_resources_}")
print(f"aggressive_elimination: {self.aggressive_elimination}")
print(f"factor: {self.factor}")
self.n_resources_ = []
self.n_candidates_ = []
for itr in range(n_iterations):
power = itr # default
if self.aggressive_elimination:
# this will set n_resources to the initial value (i.e. the
# value of n_resources at the first iteration) for as many
# iterations as needed (while candidates are being
# eliminated), and then go on as usual.
power = max(0, itr - n_required_iterations + n_possible_iterations)
n_resources = int(self.factor ** power * self.min_resources_)
# guard, probably not needed
n_resources = min(n_resources, self.max_resources_)
self.n_resources_.append(n_resources)
n_candidates = len(candidate_params)
self.n_candidates_.append(n_candidates)
if self.verbose:
print("-" * 10)
print(f"iter: {itr}")
print(f"n_candidates: {n_candidates}")
print(f"n_resources: {n_resources}")
if self.resource == "n_samples":
# subsampling will be done in cv.split()
cv = _SubsampleMetaSplitter(
base_cv=self._checked_cv_orig,
fraction=n_resources / self._n_samples_orig,
subsample_test=True,
random_state=self.random_state,
)
else:
# Need copy so that the n_resources of next iteration does
# not overwrite
candidate_params = [c.copy() for c in candidate_params]
for candidate in candidate_params:
candidate[self.resource] = n_resources
cv = self._checked_cv_orig
more_results = {
"iter": [itr] * n_candidates,
"n_resources": [n_resources] * n_candidates,
}
results = evaluate_candidates(
candidate_params, cv, more_results=more_results
)
n_candidates_to_keep = ceil(n_candidates / self.factor)
candidate_params = _top_k(results, n_candidates_to_keep, itr)
self.n_remaining_candidates_ = len(candidate_params)
self.n_required_iterations_ = n_required_iterations
self.n_possible_iterations_ = n_possible_iterations
self.n_iterations_ = n_iterations
@abstractmethod
def _generate_candidate_params(self):
pass
def _more_tags(self):
tags = deepcopy(super()._more_tags())
tags["_xfail_checks"].update(
{
"check_fit2d_1sample": (
"Fail during parameter check since min/max resources requires"
" more samples"
),
}
)
return tags
class HalvingGridSearchCV(BaseSuccessiveHalving):
"""Search over specified parameter values with successive halving.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using
more and more resources.
Read more in the :ref:`User guide <successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingGridSearchCV
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum amount of resource that any candidate is allowed to use
for a given iteration. By default, this is set to ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='exhaust'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression
problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
classification problem
- ``1`` when ``resource != 'n_samples'``
- 'exhaust' will set `r0` such that the **last** iteration uses as
much resources as possible. Namely, the last iteration will use the
highest value smaller than ``max_resources`` that is a multiple of
both ``min_resources`` and ``factor``. In general, using 'exhaust'
leads to a more accurate estimator, but is slightly more time
consuming.
Note that the amount of resources used at each iteration is always a
multiple of ``min_resources``.
aggressive_elimination : bool, default=False
This is only relevant in cases where there isn't enough resources to
reduce the remaining candidates to at most `factor` after the last
iteration. If ``True``, then the search process will 'replay' the
first iteration for as long as needed until the number of candidates
is small enough. This is ``False`` by default, which means that the
last iteration may evaluate more than ``factor`` candidates. See
:ref:`aggressive_elimination` for more details.
cv : int, cross-validation generator or iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. note::
Due to implementation details, the folds produced by `cv` must be
the same across multiple calls to `cv.split()`. For
built-in `scikit-learn` iterators, this can be achieved by
deactivating shuffling (`shuffle=False`), or by setting the
`cv`'s `random_state` parameter to an integer.
scoring : string, callable, or None, default=None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's score method is used.
refit : bool, default=True
If True, refit an estimator using the best found parameters on the
whole dataset.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HalvingGridSearchCV`` instance.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for subsampling the dataset
when `resources != 'n_samples'`. Ignored otherwise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Controls the verbosity: the higher, the more messages.
Attributes
----------
n_resources_ : list of int
The amount of resources used at each iteration.
n_candidates_ : list of int
The number of candidate parameters that were evaluated at each
iteration.
n_remaining_candidates_ : int
The number of candidate parameters that are left after the last
iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
max_resources_ : int
The maximum number of resources that any candidate is allowed to use
for a given iteration. Note that since the number of resources used
at each iteration must be a multiple of ``min_resources_``, the
actual number of resources used at the last iteration may be smaller
than ``max_resources_``.
min_resources_ : int
The amount of resources that are allocated for each candidate at the
first iteration.
n_iterations_ : int
The actual number of iterations that were run. This is equal to
``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
Else, this is equal to ``min(n_possible_iterations_,
n_required_iterations_)``.
n_possible_iterations_ : int
The number of iterations that are possible starting with
``min_resources_`` resources and without exceeding
``max_resources_``.
n_required_iterations_ : int
The number of iterations that are required to end up with less than
``factor`` candidates at the last iteration, starting with
``min_resources_`` resources. This will be smaller than
``n_possible_iterations_`` when there isn't enough resources.
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``. It contains many informations for
analysing the results of a search.
Please refer to the :ref:`User guide<successive_halving_cv_results>`
for details.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
best_score_ : float
Mean cross-validated score of the best_estimator.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
multimetric_ : bool
Whether or not the scorers compute several metrics.
classes_ : ndarray of shape (n_classes,)
The classes labels. This is present only if ``refit`` is specified and
the underlying estimator is a classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
See Also
--------
:class:`HalvingRandomSearchCV`:
Random search over a set of parameters using successive halving.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingGridSearchCV
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
...
>>> param_grid = {"max_depth": [3, None],
... "min_samples_split": [5, 10]}
>>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators',
... max_resources=10,
... random_state=0).fit(X, y)
>>> search.best_params_ # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
"""
_required_parameters = ["estimator", "param_grid"]
def __init__(
self,
estimator,
param_grid,
*,
factor=3,
resource="n_samples",
max_resources="auto",
min_resources="exhaust",
aggressive_elimination=False,
cv=5,
scoring=None,
refit=True,
error_score=np.nan,
return_train_score=True,
random_state=None,
n_jobs=None,
verbose=0,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
verbose=verbose,
cv=cv,
random_state=random_state,
error_score=error_score,
return_train_score=return_train_score,
max_resources=max_resources,
resource=resource,
factor=factor,
min_resources=min_resources,
aggressive_elimination=aggressive_elimination,
)
self.param_grid = param_grid
_check_param_grid(self.param_grid)
def _generate_candidate_params(self):
return ParameterGrid(self.param_grid)
class HalvingRandomSearchCV(BaseSuccessiveHalving):
"""Randomized search on hyper parameters.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using more
and more resources.
The candidates are sampled at random from the parameter space and the
number of sampled candidates is determined by ``n_candidates``.
Read more in the :ref:`User guide<successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingRandomSearchCV
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_candidates : int, default='exhaust'
The number of candidate parameters to sample, at the first
iteration. Using 'exhaust' will sample enough candidates so that the
last iteration uses as many resources as possible, based on
`min_resources`, `max_resources` and `factor`. In this case,
`min_resources` cannot be 'exhaust'.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum number of resources that any candidate is allowed to use
for a given iteration. By default, this is set ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='smallest'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression
problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
classification problem
- ``1`` when ``resource != 'n_samples'``
- 'exhaust' will set `r0` such that the **last** iteration uses as
much resources as possible. Namely, the last iteration will use the
highest value smaller than ``max_resources`` that is a multiple of
both ``min_resources`` and ``factor``. In general, using 'exhaust'
leads to a more accurate estimator, but is slightly more time
consuming. 'exhaust' isn't available when `n_candidates='exhaust'`.
Note that the amount of resources used at each iteration is always a
multiple of ``min_resources``.
aggressive_elimination : bool, default=False
This is only relevant in cases where there isn't enough resources to
reduce the remaining candidates to at most `factor` after the last
iteration. If ``True``, then the search process will 'replay' the
first iteration for as long as needed until the number of candidates
is small enough. This is ``False`` by default, which means that the
last iteration may evaluate more than ``factor`` candidates. See
:ref:`aggressive_elimination` for more details.
cv : int, cross-validation generator or an iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. note::
Due to implementation details, the folds produced by `cv` must be
the same across multiple calls to `cv.split()`. For
built-in `scikit-learn` iterators, this can be achieved by
deactivating shuffling (`shuffle=False`), or by setting the
`cv`'s `random_state` parameter to an integer.
scoring : string, callable, or None, default=None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's score method is used.
refit : bool, default=True
If True, refit an estimator using the best found parameters on the
whole dataset.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HalvingRandomSearchCV`` instance.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for subsampling the dataset
when `resources != 'n_samples'`. Also used for random uniform
sampling from lists of possible values instead of scipy.stats
distributions.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Controls the verbosity: the higher, the more messages.
Attributes
----------
n_resources_ : list of int
The amount of resources used at each iteration.
n_candidates_ : list of int
The number of candidate parameters that were evaluated at each
iteration.
n_remaining_candidates_ : int
The number of candidate parameters that are left after the last
iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
max_resources_ : int
The maximum number of resources that any candidate is allowed to use
for a given iteration. Note that since the number of resources used at
each iteration must be a multiple of ``min_resources_``, the actual
number of resources used at the last iteration may be smaller than
``max_resources_``.
min_resources_ : int
The amount of resources that are allocated for each candidate at the
first iteration.
n_iterations_ : int
The actual number of iterations that were run. This is equal to
``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
Else, this is equal to ``min(n_possible_iterations_,
n_required_iterations_)``.
n_possible_iterations_ : int
The number of iterations that are possible starting with
``min_resources_`` resources and without exceeding
``max_resources_``.
n_required_iterations_ : int
The number of iterations that are required to end up with less than
``factor`` candidates at the last iteration, starting with
``min_resources_`` resources. This will be smaller than
``n_possible_iterations_`` when there isn't enough resources.
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``. It contains many informations for
analysing the results of a search.
Please refer to the :ref:`User guide<successive_halving_cv_results>`
for details.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
best_score_ : float
Mean cross-validated score of the best_estimator.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
multimetric_ : bool
Whether or not the scorers compute several metrics.
classes_ : ndarray of shape (n_classes,)
The classes labels. This is present only if ``refit`` is specified and
the underlying estimator is a classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
See Also
--------
:class:`HalvingGridSearchCV`:
Search over a grid of parameters using successive halving.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from scipy.stats import randint
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
>>> np.random.seed(0)
...
>>> param_distributions = {"max_depth": [3, None],
... "min_samples_split": randint(2, 11)}
>>> search = HalvingRandomSearchCV(clf, param_distributions,
... resource='n_estimators',
... max_resources=10,
... random_state=0).fit(X, y)
>>> search.best_params_ # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
"""
_required_parameters = ["estimator", "param_distributions"]
def __init__(
self,
estimator,
param_distributions,
*,
n_candidates="exhaust",
factor=3,
resource="n_samples",
max_resources="auto",
min_resources="smallest",
aggressive_elimination=False,
cv=5,
scoring=None,
refit=True,
error_score=np.nan,
return_train_score=True,
random_state=None,
n_jobs=None,
verbose=0,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
verbose=verbose,
cv=cv,
random_state=random_state,
error_score=error_score,
return_train_score=return_train_score,
max_resources=max_resources,
resource=resource,
factor=factor,
min_resources=min_resources,
aggressive_elimination=aggressive_elimination,
)
self.param_distributions = param_distributions
self.n_candidates = n_candidates
def _generate_candidate_params(self):
n_candidates_first_iter = self.n_candidates
if n_candidates_first_iter == "exhaust":
# This will generate enough candidate so that the last iteration
# uses as much resources as possible
n_candidates_first_iter = self.max_resources_ // self.min_resources_
return ParameterSampler(
self.param_distributions,
n_candidates_first_iter,
random_state=self.random_state,
)
| {
"content_hash": "55f09d71329879d84c68736bab6f0bdf",
"timestamp": "",
"source": "github",
"line_count": 1047,
"max_line_length": 88,
"avg_line_length": 40.69149952244508,
"alnum_prop": 0.6218195474603324,
"repo_name": "amueller/scikit-learn",
"id": "1271691d05b7ba1f97b3798178df1d985a9ceaa9",
"size": "42604",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/model_selection/_search_successive_halving.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41206"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9958394"
},
{
"name": "Shell",
"bytes": "44588"
}
],
"symlink_target": ""
} |
import csv
import os
from purchases import Purchase
def fibonacci_blocking(limit):
nums = []
current, nxt = 0, 1
for _ in range(1, limit):
current, nxt = nxt, current + nxt
nums.append(current)
return nums
def fibonacci():
current, nxt = 0, 1
while True:
current, nxt = nxt, current + nxt
yield current
def main():
# fibonacci
for n in fibonacci():
print(n, end=', ')
if n > 100:
break
print()
# list style
# generator style
data = get_data()
two_bed_100k_homes = (
home
for home in data
if home.beds >= 2 and home.price > 100_000
)
two_bed_tups = (
(h.price, h.beds)
for h in two_bed_100k_homes
)
print(two_bed_tups)
count = 0
for p, b in two_bed_tups:
count += 1
print(p, b)
if count > 5:
break
# Find 2 bedroom houses over 100k
# loop
# comp
pass
def get_data():
base_folder = os.path.dirname(__file__)
filename = os.path.join(base_folder, 'data',
'SacramentoRealEstateTransactions2008.csv')
with open(filename, 'r', encoding='utf-8') as fin:
# with open(filename, 'r') as fin:
reader = csv.DictReader(fin)
purchases = []
for row in reader:
p = Purchase.create_from_dict(row)
purchases.append(p)
return purchases
if __name__ == '__main__':
main()
| {
"content_hash": "2da135131b9f5beb279551b74e1b5e93",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 71,
"avg_line_length": 19.063291139240505,
"alnum_prop": 0.5272244355909694,
"repo_name": "Wintellect/WintellectWebinars",
"id": "a6f540a2b4b02a1e13ec399b8ca55d39e0adf3a7",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017-04-06 - Pythonic Code Through 5 Examples/tip4_generators/gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "47583"
},
{
"name": "CSS",
"bytes": "39803"
},
{
"name": "HTML",
"bytes": "87870"
},
{
"name": "JavaScript",
"bytes": "4383753"
},
{
"name": "Jupyter Notebook",
"bytes": "234737"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "208421"
},
{
"name": "SCSS",
"bytes": "152"
},
{
"name": "Shell",
"bytes": "4251"
},
{
"name": "TypeScript",
"bytes": "142946"
}
],
"symlink_target": ""
} |
"""Setup the MySQL-compatible database.
If run directly, this module will setup the ClashCallerBot database with
tables and display their format and contents. Additionally,
this module provides a class with various methods for managing the
MySQL-compatible database:
* Create database and tables.
* View table data and properties.
* Lock tables for reading and writing.
* Grant user permissions (if logged into database as root).
* Add rows to tables.
* Delete tables and rows.
* Convert python datetime to MySQL datetime.
"""
import mysql.connector as mysql
import logging.config
import datetime
from clashcallerbotreddit import LOGGING, config
# Logger
logging.config.dictConfig(LOGGING)
# FIXME: logging.raiseExceptions = False crashes during exception. Maybe remove console handler?
logging.raiseExceptions = True # Production mode if False (no console sys.stderr output)
logger = logging.getLogger('database')
class ClashCallerDatabase(object):
"""Implements a class for a ClashCaller Database.
Acts as an object-relational mapper for mysql.connector specific to ClashCallerBot.
Attributes:
config_file (configparser.ConfigParser): A configparser object with database.ini file pre-read.
section (str): Section heading containing bot information. Defaults to 'bot'.
root_user (bool): Specifies whether the database will be setup as root user.
mysql_connection (mysql.connector.connect): A mysql.connector.connect object.
cursor (mysql.connector.connect.cursor): A mysql.connector.connect.cursor object.
"""
def __init__(self, config_file=None, section='bot', root_user=None):
if root_user is None:
raise ValueError('root_user must be given.')
if config_file is None:
raise ValueError('A ConfigParser object must be given.')
self._root_user = root_user
if self._root_user:
self._db_user = config_file['root']['user']
self._db_pass = config_file['root']['password']
self._bot_name = config_file[section]['user']
self._bot_passwd = config_file[section]['password']
else:
self._db_user = config_file[section]['user']
self._db_pass = config_file[section]['password']
self._db_name = config_file[section]['database']
self._message_table = config_file[section]['message_table']
# Initialize connections to None
self.mysql_connection = None
self.cursor = None
# Then open connections
self.open_connections()
def __repr__(self):
return f'ClashCallerDatabase(configparser.ConfigParser(\'database.ini\'), {self._root_user})'
def __str__(self):
return f'Logged into database: {self._db_name} as: {self._db_user}'
def close_connections(self) -> None:
"""Close database connections.
Method closes database cursor and connection.
"""
try:
self.cursor.close()
self.mysql_connection.close()
except mysql.Error as err:
logger.exception(f'close_connections: {err}')
@staticmethod
def convert_datetime(dt: datetime) -> str:
"""Converts python datetime to MySQL datetime.
Method converts given python datetime object to MySQL datetime format.
Args:
dt: Datetime object in default format.
Returns:
Datetime string in MySQL format.
"""
return dt.strftime('%Y-%m-%d %H:%M:%S') # Convert to MySQL datetime
def create_database(self) -> None:
"""Create database.
Method creates database with database name.
"""
try:
self.cursor.execute(f'CREATE DATABASE {self._db_name};')
except mysql.Error as err:
logger.exception(f'create_database: {err}')
def create_table(self, tbl_name: str, cols: str) -> None:
"""Create table in database.
Method creates table in database with given name and specifications.
Args:
tbl_name: Name to give table.
cols: Columns to put in table.
Example:
>>> from clashcallerbotreddit import config
>>> from clashcallerbotreddit.database import ClashCallerDatabase
>>> db = ClashCallerDatabase(config, root_user=False)
>>> tbl_name = 'table'
>>> cols = 'id INT UNSIGNED NOT NULL AUTO_INCREMENT, '
... 'permalink VARCHAR(100), message VARCHAR(100), new_date DATETIME, '
... 'userID VARCHAR(20), PRIMARY KEY(id)'
...
>>> db.create_table(tbl_name, cols)
"""
try:
cmd = f'CREATE TABLE {tbl_name} ({cols}) ENGINE=InnoDB;'
self.select_database()
self.cursor.execute(cmd)
except mysql.Error as err:
logger.exception(f'create_table: {err}')
def delete_row(self, tid: str) -> None:
"""Deletes row from message table.
Method deletes given table id (row) from message table.
Args:
tid: Table id from id column of message table.
"""
try:
self.lock_write(self._message_table)
delete_row = f'DELETE FROM {self._message_table} WHERE id = \'{tid}\';'
self.cursor.execute(delete_row)
self.mysql_connection.commit()
self.unlock_tables()
except mysql.Error as err:
logger.exception(f'delete_row: {err}')
def describe_table(self, tbl_name: str) -> list:
"""Gets description of table.
Method returns a list describing the structure of the given table.
Args:
tbl_name: Name of table to describe
Returns:
List with table description, empty list otherwise.
"""
description = []
try:
self.lock_read(tbl_name)
self.cursor.execute(f'DESCRIBE {tbl_name};')
description = self.cursor.fetchall()
self.unlock_tables()
except mysql.Error as err:
logger.exception(f'describe_table: {err}')
return description
def drop_table(self, tbl_name: str) -> None:
"""Drop table from database.
Function drops given table from given database.
Args:
tbl_name: Table to drop.
"""
try:
self.select_database()
tables = self.get_tables()
if tbl_name not in tables:
raise mysql.ProgrammingError('Table does not exist.')
self.lock_write(tbl_name)
self.cursor.execute(f'DROP TABLE IF EXISTS {tbl_name};')
self.unlock_tables()
except (mysql.Error, mysql.ProgrammingError) as err:
logger.exception(f'drop_table: {err}')
def get_removable_messages(self, usr_name: str, link: str) -> list:
"""Retrieves list of messages that match the username and permalink.
Checks the message table for rows containing the given user name and given link.
Args:
usr_name: Reddit username wanting to delete saved calls.
link: Comment permalink of saved call (without domain prefix)
Returns:
List of messages matching query. Empty list if none found.
"""
messages = []
try:
self.lock_read(self._message_table)
find_messages = f'SELECT * FROM {self._message_table} WHERE (username = \'{usr_name}\') AND (permalink ' \
f'= \'{link}\') GROUP BY id;'
self.cursor.execute(find_messages)
messages = self.cursor.fetchall()
self.unlock_tables()
except mysql.Error as err:
logger.exception(f'get_removable_messages: {err}')
return messages
def get_expired_messages(self, time_now: datetime.datetime) -> list:
"""Retrieves list of messages that have expired.
Method returns list of messages whose expiration times are before current datetime.
Args:
time_now: Current datetime.
Returns:
List containing results of query.
"""
messages = []
time_now = self.convert_datetime(time_now)
try:
self.lock_read(self._message_table)
find_messages = f'SELECT * FROM {self._message_table} WHERE new_date < \'{time_now}\' GROUP BY id;'
self.cursor.execute(find_messages)
messages = self.cursor.fetchall()
self.unlock_tables()
except mysql.Error as err:
logger.exception(f'get_expired_messages: {err}')
return messages
def get_tables(self) -> list:
"""Return table list of database.
Method returns a list with the names of the tables.
Returns:
List of table names.
"""
table_names = []
try:
self.select_database()
self.cursor.execute('SHOW TABLES;')
tables = self.cursor.fetchall()
for table in tables:
table_names.append(str(table[0]))
except mysql.Error as err:
logger.exception(f'get_tables: {err}')
return table_names
def get_rows(self, tbl_name: str) -> tuple:
"""Fetch table rows.
Method gets rows of given table by order of id in a tuple.
Args:
tbl_name: Name of table to get rows from.
Returns:
Tuple containing each row's data, empty tuple otherwise.
"""
rows = ()
try:
self.lock_read(tbl_name)
self.cursor.execute(f'SELECT * FROM {tbl_name} GROUP BY id;')
rows = tuple(self.cursor.fetchall())
self.unlock_tables()
except mysql.Error as err:
logger.exception(f'get_rows: {err}')
return rows
def get_user_messages(self, usr_name: str) -> list:
"""Retrieves list of messages that match the username.
Checks the message table for rows containing the given user name.
Args:
usr_name: Reddit username wanting to list saved calls.
Returns:
List of messages matching query. Empty list if none found.
"""
messages = []
try:
self.lock_read(self._message_table)
find_messages = f'SELECT * FROM {self._message_table} WHERE (username = \'{usr_name}\') GROUP BY id;'
self.cursor.execute(find_messages)
messages = self.cursor.fetchall()
self.unlock_tables()
except mysql.Error as err:
logger.exception(f'get_user_messages: {err}')
return messages
def grant_permissions(self) -> None:
"""Grants bot user permissions to database.
Method grants bot user permissions to database.
Notes:
Only database root user can grant database permissions.
"""
if not self._root_user:
msg = 'Only root user can grant database permissions.'
logger.error(msg)
raise RuntimeError(msg)
try:
cmd = f'GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, INDEX, ALTER, ' \
f'CREATE TEMPORARY TABLES, LOCK TABLES ON {self._db_name}.* TO \'{self._bot_name}\'@localhost ' \
f'IDENTIFIED BY \'{self._bot_passwd}\';'
self.cursor.execute(cmd)
except (mysql.Error, RuntimeError) as err:
logger.exception(f'grant_permissions: {err}')
def lock_read(self, tbl_name: str) -> None:
"""Locks table for reading.
Method locks a given table for read access.
Args:
tbl_name: Name of table to lock.
Returns:
True if successful, False otherwise.
Notes:
* Any previous locks are `implicitly released`_.
* Read locks have lower priority than write locks.
.. _implicitly released:
https://dev.mysql.com/doc/refman/8.0/en/lock-tables.html
"""
try:
lock = f'LOCK TABLE {tbl_name} READ;'
self.cursor.execute(lock)
except mysql.Error as err:
logger.exception(f'lock_read: {err}')
def lock_write(self, tbl_name: str) -> None:
"""Locks table for writing.
Method locks a given table for write access.
Args:
tbl_name: Name of table to lock.
Notes:
* Any previous locks are `implicitly released`_.
* Write locks have higher priority than read locks.
.. _implicitly released:
https://dev.mysql.com/doc/refman/8.0/en/lock-tables.html
"""
try:
lock = f'LOCK TABLE {tbl_name} WRITE;'
self.cursor.execute(lock)
except mysql.Error as err:
logger.exception(f'lock_write: {err}')
def open_connections(self) -> None:
"""Open database connections.
Method makes database connection and cursor.
"""
try:
self.mysql_connection = mysql.connect(user=self._db_user, password=self._db_pass, database=self._db_name)
self.cursor = self.mysql_connection.cursor()
except mysql.Error as err:
logger.exception(f'open_connections: {err}')
def save_message(self, link: str, msg: str, exp: datetime, usr_name: str) -> None:
"""Saves given comment data into message_data table.
Method saves given inputs in message_date table as a row.
Args:
link: Comment permalink.
msg: Comment message.
exp: Expiration datetime object.
usr_name: Comment author username.
"""
exp_mysql = self.convert_datetime(exp)
if "'" in msg:
msg = msg.replace("'", "") # Remove apostrophes in message
if ";" in msg:
msg = msg.replace(";", "") # Remove semicolons in message
try:
self.lock_write(self._message_table)
add_row = f'INSERT INTO {self._message_table} (permalink, message, new_date, username) ' \
f'VALUES (\'{link}\', \'{msg}\', \'{exp_mysql}\', \'{usr_name}\');'
self.cursor.execute(add_row)
self.mysql_connection.commit()
self.unlock_tables()
except mysql.Error as err:
logger.exception(f'save_message: {err}')
def select_database(self) -> None:
"""Select database for command execution.
Method selects database within MySQL for command execution.
"""
try:
self.cursor.execute(f'USE {self._db_name};')
except mysql.Error as err:
logger.exception(f'select_database: {err}')
def unlock_tables(self) -> None:
"""Unlocks tables to allow access.
Method unlocks tables to allow read/write access.
"""
try:
unlock = 'UNLOCK TABLES;'
self.cursor.execute(unlock)
except mysql.Error as err:
logger.exception(f'unlock_tables: {err}')
def main():
# Create the clashcaller database
database = ClashCallerDatabase(config_file=config, root_user=False)
# Select the clashcaller database
database.select_database()
# Show the tables
tables = database.get_tables()
print(tables)
# Create message table, if it doesn't exist
if database._message_table not in tables:
col = 'id INT UNSIGNED NOT NULL AUTO_INCREMENT, ' \
'permalink VARCHAR(100), message VARCHAR(100), new_date DATETIME, ' \
'username VARCHAR(20), PRIMARY KEY(id)'
database.create_table(database._message_table, col)
tables = database.get_tables()
# Describe message table
print(database.describe_table(database._message_table))
# Fetch rows from message_data as tuple of tuples
print(database.get_rows(database._message_table))
# Grant database bot permissions, if root
if database._root_user: # Direct access of protected member, but only to read. Should be okay...?
database.grant_permissions()
# Close database connections
database.close_connections()
# If run directly, instead of imported as a module, run main():
if __name__ == '__main__':
main()
| {
"content_hash": "4fc8911a926654c2001eaa62aeb10f06",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 118,
"avg_line_length": 33.58316221765914,
"alnum_prop": 0.5939468052583308,
"repo_name": "JoseALermaIII/clashcallerbot-reddit",
"id": "a908d2f81a1ad9b982b337a337d3ef14bb5cbe35",
"size": "16390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clashcallerbotreddit/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51223"
},
{
"name": "Shell",
"bytes": "4473"
}
],
"symlink_target": ""
} |
def capitalize_first_letter(s):
if s and len(s) > 1:
capitalized = s[0].upper() + s[1:]
return capitalized
return ''
| {
"content_hash": "50207ef731bd04ee0f0a0aa568db4f25",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 28.2,
"alnum_prop": 0.5602836879432624,
"repo_name": "talbor49/Poet",
"id": "cbb8c80cee7a2a03de37f3c023e1c467bdb686f5",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1506854"
},
{
"name": "HTML",
"bytes": "6399"
},
{
"name": "JavaScript",
"bytes": "1513122"
},
{
"name": "Python",
"bytes": "19558"
}
],
"symlink_target": ""
} |
from framework.models.common import NdbModel
from mcfw.rpc import serialize_complex_value, parse_complex_value
def convert_to_unicode(v):
if v is None:
return None
if isinstance(v, str):
return v.decode('utf-8')
return v
class TO(object):
def __str__(self):
# Useful when debugging. Can be evaluated to get an object with the same properties back.
return '%s(%s)' % (self.__class__.__name__, ', '.join('%s=%r' % (k, getattr(self, k))
for k in self.to_dict()))
__repr__ = __str__
def __init__(self, **kwargs):
if 'type' in kwargs and isinstance(kwargs['type'], basestring):
# Fix for creating objects with subtype_mapping via constructor
setattr(self, 'type', convert_to_unicode(kwargs['type']))
for k, v in kwargs.iteritems():
if isinstance(v, str):
v = v.decode('utf-8')
setattr(self, k, v)
def to_dict(self, include=None, exclude=None):
# type: (list[basestring], list[basestring]) -> dict
result = serialize_complex_value(self, type(self), False, skip_missing=True)
if include:
if not isinstance(include, list):
include = [include]
return {key: result[key] for key in include if key in result}
if exclude:
if not isinstance(exclude, list):
exclude = [exclude]
return {key: result[key] for key in set(result.keys()) - set(exclude) if key in result}
return result
@classmethod
def from_model(cls, model):
assert isinstance(model, NdbModel)
return cls.from_dict(model.to_dict())
@classmethod
def from_dict(cls, data):
# type: (dict) -> cls
return parse_complex_value(cls, data, False)
@classmethod
def from_list(cls, data):
# type: (list[dict]) -> list[cls]
return parse_complex_value(cls, data, True)
| {
"content_hash": "20ec3d785409065394e14dd792419a1d",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 99,
"avg_line_length": 35.21052631578947,
"alnum_prop": 0.5700049825610364,
"repo_name": "rogerthat-platform/gae-plugin-framework",
"id": "d61f45f9cd85cdd8806e51e33421f9d95737bf78",
"size": "2641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/framework/server/framework/to/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2821"
},
{
"name": "HTML",
"bytes": "9064"
},
{
"name": "JavaScript",
"bytes": "7755"
},
{
"name": "Python",
"bytes": "84984"
},
{
"name": "Shell",
"bytes": "787"
},
{
"name": "TypeScript",
"bytes": "120599"
}
],
"symlink_target": ""
} |
from zeit.cms.i18n import MessageFactory as _
import datetime
import pytz
import re
import zope.app.container.interfaces
import zope.i18nmessageid
import zope.interface
import zope.interface.common.sequence
import zope.schema
import zope.security
DOCUMENT_SCHEMA_NS = u"http://namespaces.zeit.de/CMS/document"
QPS_SCHEMA_NS = u"http://namespaces.zeit.de/QPS/attributes"
ID_NAMESPACE = u'http://xml.zeit.de/'
TEASER_NAMESPACE = u'http://xml.zeit.de/CMS/Teaser'
PRINT_NAMESPACE = u"http://namespaces.zeit.de/CMS/print"
# lovely.remotetask stores times as 32 bit leading to an overflow after 2030.
MAX_PUBLISH_DATE = datetime.datetime(2030, 1, 1, tzinfo=pytz.UTC)
# Backward compatibility imports
from zeit.connector.interfaces import ( # noqa
DeleteProperty, LockingError, IConnector, IResource,
IWebDAVReadProperties, IWebDAVWriteProperties, IWebDAVProperties)
class ICMSContentType(zope.interface.interfaces.IInterface):
"""Interface for content types."""
class InvalidName(zope.schema.ValidationError):
__doc__ = _('Name contains invalid characters')
class ValidationError(zope.schema.ValidationError):
def doc(self):
return self.args[0]
valid_name_regex = re.compile(r'^[A-Za-z0-9\.\,\-_*()~]+$').match
def valid_name(value):
if not valid_name_regex(value):
raise InvalidName(value)
return True
class ICMSContent(zope.interface.Interface):
"""Interface for all CMS content being loaded from the repository.
"""
uniqueId = zope.interface.Attribute("Unique Id")
__name__ = zope.schema.TextLine(
title=_("File name"),
readonly=True,
constraint=valid_name)
class ICMSWCContent(zope.interface.Interface):
"""Adapting to this yields ICMSContent from the workingcopy if present,
else from the repository."""
class IEditorialContent(ICMSContent):
"""Editorial content.
Editorial content is content which actually *is* content. That is in
contrast to for example folders which are used for structuring.
"""
class IAsset(ICMSContent):
"""Assets are special, usually simple, content objects.
Assets are useles themselves but are integrated into other objects.
An example is the image.
"""
class IEditPermission(zope.security.interfaces.IPermission):
"""A permission which is always forbidden in the repository."""
class ITypeDeclaration(zope.interface.Interface):
type_identifier = zope.schema.TextLine(
title=u'Unique identifier for this type')
# XXX add other attributes
class IResult(zope.interface.common.sequence.IReadSequence):
"""A list of dicts, with info about the total number of entries."""
hits = zope.interface.Attribute(
'Number of total available entries (for pagination)')
class Result(list):
"""A list with additional property ``hits``."""
zope.interface.implements(IResult)
hits = 0
def normalize_filename(filename):
# NOTE: The master version of the algorithm is implemented in JS in
# zeit.cms.browser.js:filename.js, keep in sync!
f = filename
f = f.strip().lower()
f = f.replace(u'ä', 'ae')
f = f.replace(u'ö', 'oe')
f = f.replace(u'ü', 'ue')
f = f.replace(u'ß', 'ss')
# Remove special characters at beginning and end
# XXX It's unclear why this doesn't work as a single regexp.
f = re.sub('^([^a-z0-9]+)(.*?)$', r'\2', f)
f = re.sub('^(.*?)([^a-z0-9]+)$', r'\1', f)
# Replace special characters, but keep dots for special treatment
f = re.sub('[^a-z0-9.]', '-', f)
# Save dot of filename extensions
f = re.sub(
r'^(.*)\.(jpg|jpeg|png|pdf|mp3|swf|rtf|gif|svg|bmp)$', r'\1_\2', f)
# Remove all dots
f = f.replace('.', '-')
# Restore saved dot
f = f.replace('_', '.')
# Collapse multiple consecutive dashes
f = re.sub('-+', '-', f)
# Edge case: Remove special char before the filename extension
f = f.replace('-.', '.')
return f
| {
"content_hash": "2d261a2ba9ffa934668bb05b1abd22bf",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 77,
"avg_line_length": 27.386206896551723,
"alnum_prop": 0.6789221858473936,
"repo_name": "ZeitOnline/zeit.cms",
"id": "df87e292281d391bb01019fa6347c9261ab1b75f",
"size": "3990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/cms/interfaces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "45467"
},
{
"name": "HTML",
"bytes": "10561"
},
{
"name": "JavaScript",
"bytes": "152481"
},
{
"name": "Python",
"bytes": "920274"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Producto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=100)),
('cantidad', models.IntegerField(default=10)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "aa25996c47b45052015ffd2a458d3b7d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 25.956521739130434,
"alnum_prop": 0.541038525963149,
"repo_name": "DJMora/Djember",
"id": "f8d2cdbc8815bbb26fc6424215d1cf54f0350e2d",
"size": "621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "339"
},
{
"name": "HTML",
"bytes": "6091"
},
{
"name": "JavaScript",
"bytes": "717400"
},
{
"name": "Python",
"bytes": "8577"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from pyroomacoustics.directivities import (
DirectivityPattern,
DirectionVector,
CardioidFamily,
)
pattern = DirectivityPattern.HYPERCARDIOID
orientation = DirectionVector(azimuth=0, colatitude=45, degrees=True)
# create cardioid object
dir_obj = CardioidFamily(orientation=orientation, pattern_enum=pattern)
# plot
azimuth = np.linspace(start=0, stop=360, num=361, endpoint=True)
colatitude = np.linspace(start=0, stop=180, num=180, endpoint=True)
# colatitude = None # for 2D plot
dir_obj.plot_response(azimuth=azimuth, colatitude=colatitude, degrees=True)
plt.show()
| {
"content_hash": "b99efa0fd8f4de4c06d195e4d8a93253",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.778125,
"repo_name": "LCAV/pyroomacoustics",
"id": "fe10bbae8500e5f437097d84861dd3d6653cc386",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/directivities/plot_directivity_3D.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "96552"
},
{
"name": "Cython",
"bytes": "2700"
},
{
"name": "Dockerfile",
"bytes": "735"
},
{
"name": "Python",
"bytes": "941773"
}
],
"symlink_target": ""
} |
import os
import re
import codecs
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
try:
f = codecs.open(os.path.join(here, *file_paths), 'r', 'latin1')
version_file = f.read()
f.close()
except:
raise RuntimeError("Unable to find version string.")
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
try:
f = codecs.open('README.rst', encoding='utf-8')
long_description = f.read()
f.close()
except:
long_description = ''
try:
f = codecs.open('requirements.txt', encoding='utf-8')
requirements = f.read().splitlines()
f.close()
except:
requirements = []
setup(
name='python-mu',
version=find_version('mu/__init__.py'),
description=('Python module and CLI to package and upload python lambda '
'functions to AWS Lambda.'),
long_description=long_description,
keywords='aws amazon lambda',
author='Matt Martz',
author_email='matt@sivel.net',
url='https://github.com/sivel/mu',
license='Apache License, Version 2.0',
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=requirements,
entry_points={
'console_scripts': [
'mu=mu:main',
]
},
package_data={
'': ['tox.ini'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Environment :: Console',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
]
)
| {
"content_hash": "6ca2d83131d5c3e813d8341f61644248",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 77,
"avg_line_length": 29.402439024390244,
"alnum_prop": 0.60348403152219,
"repo_name": "sivel/mu",
"id": "f259c9e3e51eae3ec164317288387befc95b16cf",
"size": "3083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17015"
}
],
"symlink_target": ""
} |
r"""Memoizing decorators
memoize is for functions with args only (no kw args)
Memoize is for instance methods
"""
from functools import partial
def memoize(f):
""" Memoization decorator for functions taking one or more arguments. """
class Memodict(dict):
r"""Memodict inherits dict to store already computed values"""
def __init__(self, func_):
super(Memodict, self).__init__()
self.f = func_
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return Memodict(f)
class Memoize(object):
"""Cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj(object):
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
From:
http://code.activestate.com/recipes/
577452-a-memoize-decorator-for-instance-methods/
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
| {
"content_hash": "a2c2880dea8f68eddc84102ff57c58f3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 28.34285714285714,
"alnum_prop": 0.5952620967741935,
"repo_name": "fullmar/corelib",
"id": "5b86a5c0ebd94a9ac312a64a3d02fafebb3df00a",
"size": "2009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corelib/core/memoize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47676"
}
],
"symlink_target": ""
} |
"""Shared utils for Federated Reconstruction training and evaluation."""
from collections.abc import Callable
from typing import Optional
import tensorflow as tf
from tensorflow_federated.python.learning.models import model_weights
from tensorflow_federated.python.learning.reconstruction import model as model_lib
# Type alias for a function that takes in a TF dataset and produces two TF
# datasets. This is consumed by training and evaluation computation builders.
# The first is iterated over during reconstruction and the second is iterated
# over post-reconstruction, for both training and evaluation. This can be useful
# for e.g. splitting the dataset into disjoint halves for each stage, doing
# multiple local epochs of reconstruction/training, skipping reconstruction
# entirely, etc. See `build_dataset_split_fn` for a builder, although users can
# also specify their own `DatasetSplitFn`s (see `simple_dataset_split_fn` for an
# example).
DatasetSplitFn = Callable[[tf.data.Dataset, tf.Tensor], tuple[tf.data.Dataset,
tf.data.Dataset]]
def simple_dataset_split_fn(
client_dataset: tf.data.Dataset) -> tuple[tf.data.Dataset, tf.data.Dataset]:
"""An example of a `DatasetSplitFn` that returns the original client data.
Both the reconstruction data and post-reconstruction data will result from
iterating over the same tf.data.Dataset. Note that depending on any
preprocessing steps applied to client tf.data.Datasets, this may not produce
exactly the same data in the same order for both reconstruction and
post-reconstruction. For example, if
`client_dataset.shuffle(reshuffle_each_iteration=True)` was applied,
post-reconstruction data will be in a different order than reconstruction
data.
Args:
client_dataset: `tf.data.Dataset` representing client data.
Returns:
A tuple of two `tf.data.Datasets`, the first to be used for reconstruction,
the second to be used post-reconstruction.
"""
return client_dataset, client_dataset
def build_dataset_split_fn(recon_epochs: int = 1,
recon_steps_max: Optional[int] = None,
post_recon_epochs: int = 1,
post_recon_steps_max: Optional[int] = None,
split_dataset: bool = False) -> DatasetSplitFn:
"""Builds a `DatasetSplitFn` for Federated Reconstruction training/evaluation.
Returned `DatasetSplitFn` parameterizes training and evaluation computations
and enables reconstruction for multiple local epochs, multiple epochs of
post-reconstruction training, limiting the number of steps for both stages,
and splitting client datasets into disjoint halves for each stage.
Note that the returned function is used during both training and evaluation:
during training, "post-reconstruction" refers to training of global variables,
and during evaluation, it refers to calculation of metrics using reconstructed
local variables and fixed global variables.
Args:
recon_epochs: The integer number of iterations over the dataset to make
during reconstruction.
recon_steps_max: If not None, the integer maximum number of steps (batches)
to iterate through during reconstruction. This maximum number of steps is
across all reconstruction iterations, i.e. it is applied after
`recon_epochs`. If None, this has no effect.
post_recon_epochs: The integer constant number of iterations to make over
client data after reconstruction.
post_recon_steps_max: If not None, the integer maximum number of steps
(batches) to iterate through after reconstruction. This maximum number of
steps is across all post-reconstruction iterations, i.e. it is applied
after `post_recon_epochs`. If None, this has no effect.
split_dataset: If True, splits `client_dataset` in half for each user, using
even-indexed entries in reconstruction and odd-indexed entries after
reconstruction. If False, `client_dataset` is used for both reconstruction
and post-reconstruction, with the above arguments applied. If True,
splitting requires that mupltiple iterations through the dataset yield the
same ordering. For example if
`client_dataset.shuffle(reshuffle_each_iteration=True)` has been called,
then the split datasets may have overlap. If True, note that the dataset
should have more than one batch for reasonable results, since the
splitting does not occur within batches.
Returns:
A `SplitDatasetFn`.
"""
# Functions for splitting dataset if needed.
recon_condition = lambda i, entry: tf.equal(tf.math.floormod(i, 2), 0)
post_recon_condition = lambda i, entry: tf.greater(tf.math.floormod(i, 2), 0)
get_entry = lambda i, entry: entry
def dataset_split_fn(
client_dataset: tf.data.Dataset
) -> tuple[tf.data.Dataset, tf.data.Dataset]:
"""A `DatasetSplitFn` built with the given arguments.
Args:
client_dataset: `tf.data.Dataset` representing client data.
Returns:
A tuple of two `tf.data.Datasets`, the first to be used for
reconstruction, the second to be used post-reconstruction.
"""
# Split dataset if needed. This assumes the dataset has a consistent
# order across iterations.
if split_dataset:
recon_dataset = client_dataset.enumerate().filter(recon_condition).map(
get_entry)
post_recon_dataset = client_dataset.enumerate().filter(
post_recon_condition).map(get_entry)
else:
recon_dataset = client_dataset
post_recon_dataset = client_dataset
# Apply `recon_epochs` before limiting to a maximum number of batches
# if needed.
recon_dataset = recon_dataset.repeat(recon_epochs)
if recon_steps_max is not None:
recon_dataset = recon_dataset.take(recon_steps_max)
# Do the same for post-reconstruction.
post_recon_dataset = post_recon_dataset.repeat(post_recon_epochs)
if post_recon_steps_max is not None:
post_recon_dataset = post_recon_dataset.take(post_recon_steps_max)
return recon_dataset, post_recon_dataset
return dataset_split_fn
def get_global_variables(model: model_lib.Model) -> model_weights.ModelWeights:
"""Gets global variables from a `Model` as `ModelWeights`."""
return model_weights.ModelWeights(
trainable=model.global_trainable_variables,
non_trainable=model.global_non_trainable_variables)
def get_local_variables(model: model_lib.Model) -> model_weights.ModelWeights:
"""Gets local variables from a `Model` as `ModelWeights`."""
return model_weights.ModelWeights(
trainable=model.local_trainable_variables,
non_trainable=model.local_non_trainable_variables)
def has_only_global_variables(model: model_lib.Model) -> bool:
"""Returns `True` if the model has no local variables."""
local_variables_list = (
list(model.local_trainable_variables) +
list(model.local_non_trainable_variables))
if local_variables_list:
return False
return True
| {
"content_hash": "bf903cd6e806386c82e564bdadba0be0",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 82,
"avg_line_length": 45.38461538461539,
"alnum_prop": 0.7275423728813559,
"repo_name": "tensorflow/federated",
"id": "d8ae62439bb7fa8dc115843af12a1009523ead52",
"size": "7841",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/learning/reconstruction/reconstruction_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from gum.indexer import MappingType, indexer
from gum.tests.test_app.models import Post, CommentThread, Comment
class PostMappingType(MappingType):
def document(self, instance):
tags_text = " ".join(map(lambda x: x.label, instance.tags.all()))
return {
"title": instance.title,
"content": instance.content,
"text": "{} {} {}".format(instance.title, instance.content, tags_text)
}
def mapping(self):
return {
self.get_type(): {
"properties": {
"title": {
"type": "string",
"store": True,
},
"content": {
"type": "string",
"store": True,
},
"text": {
"type": "string",
"store": True,
}
}
}
}
indexer.register(Post, PostMappingType)
class CommentThreadMappingType(MappingType):
def document(self, instance):
return [
{
"_id": comment.pk,
"thread_id": instance.pk,
"content": comment.content,
} for comment in instance.comments.all()
]
def mapping(self):
return {
self.get_type(): {
"properties": {
"thread_id": {
"type": "long",
"store": True,
},
"content": {
"type": "string",
"store": True,
}
}
}
}
indexer.register(CommentThread, CommentThreadMappingType)
| {
"content_hash": "f6fd3e6ea352495d0241f33f511fe0d4",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 82,
"avg_line_length": 28.09090909090909,
"alnum_prop": 0.40722761596548,
"repo_name": "marcosgabarda/django-gum",
"id": "8be27482869541756afa7b6abc340e9f414a7717",
"size": "1878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gum/tests/test_app/index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36561"
}
],
"symlink_target": ""
} |
import argparse
import os
import re
def basename(path):
return os.path.splitext(os.path.basename(path))[0]
def char_escape(c):
# Escaping suitable for Protobuf text format, which is C-like.
if c in "'\"\\":
return "\\" + c
elif c == "\n":
return "\\n"
else:
return c
def main():
parser = argparse.ArgumentParser(
description="Generate sanitizer_api_fuzzer seed corpus.")
parser.add_argument("--outdir", required=True)
parser.add_argument("--dictionary")
parser.add_argument("inputs", nargs="+")
args = parser.parse_args()
# For simplicity, read all inputs into dictionary.
inputs = {}
for input_file in args.inputs:
with open(input_file, "r") as f:
inputs[input_file] = f.read()
# Use file extensions to distinguish HTML from config inputs.
htmls = [name for name in inputs if name.endswith(".html")]
configs = [name for name in inputs if name.endswith(".txt")]
# Generate each combo of html + config, and write it into --outdir.
for html in htmls:
for config in configs:
name = "%s/%s-%s.txt" % (args.outdir, basename(html),
basename(config))
escaped_html = "".join(map(char_escape, inputs[html]))
with open(name, "w") as f:
f.write("html_string: \"%s\"\n%s\n" %
(escaped_html, inputs[config]))
# Write a "dictionary" file with the element and attribute names.
# Extract element and attribute names with simple regexps. It doesn't matter
# if these will always match correctly, as long as the dictionary is mostly
# sensible.
if args.dictionary:
seed_dictionary = set()
for html in htmls:
seed_dictionary.update(re.findall(r'(?<=<)\w+\b', inputs[html]))
seed_dictionary.update(re.findall(r'\b\w+(?==)', inputs[html]))
with open(args.dictionary, "w") as f:
for word in seed_dictionary:
f.write("\"%s\"\n" % word)
if __name__ == '__main__':
main()
| {
"content_hash": "ef5603dbdff0b4456be0dba7e4a50ae9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 33.3968253968254,
"alnum_prop": 0.5850760456273765,
"repo_name": "chromium/chromium",
"id": "49792626d9efca908e4a6331aab59493919fa47d",
"size": "2267",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/blink/renderer/modules/sanitizer_api/build_corpus.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('description', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(max_length=4000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(null=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('last_updated', models.DateTimeField(auto_now_add=True)),
('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='topics', to='boards.Board')),
('starter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='topics', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='boards.Topic'),
),
migrations.AddField(
model_name='post',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "fa1a15e643b184da2a8a87cfb5ffcd50",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 146,
"avg_line_length": 42.81818181818182,
"alnum_prop": 0.5910828025477707,
"repo_name": "he305/google-analytics-site",
"id": "17735d3978cfa07a3f95d07efd849ef7457b3f78",
"size": "2428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boards/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155118"
},
{
"name": "HTML",
"bytes": "11771"
},
{
"name": "JavaScript",
"bytes": "202413"
},
{
"name": "Python",
"bytes": "19780"
}
],
"symlink_target": ""
} |
import sys, os, turtle
def turtledraw():
turtle.Turtle();
turtle.pendown();
turtle.left(100);
def main():
turtledraw();
turtle.done();
main();
print("Hello");
| {
"content_hash": "6882ad00c230620dea9c166c5dd4a466",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 22,
"avg_line_length": 9.45,
"alnum_prop": 0.5767195767195767,
"repo_name": "defunSM/code",
"id": "7e762533201d6d66376340fdefb64cf46822f7d8",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyth/turtle1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25797"
},
{
"name": "C++",
"bytes": "69493"
},
{
"name": "Clojure",
"bytes": "57991"
},
{
"name": "Common Lisp",
"bytes": "62557"
},
{
"name": "IDL",
"bytes": "198"
},
{
"name": "Java",
"bytes": "8566"
},
{
"name": "Makefile",
"bytes": "567303"
},
{
"name": "NewLisp",
"bytes": "739"
},
{
"name": "Perl",
"bytes": "855"
},
{
"name": "Python",
"bytes": "47890"
},
{
"name": "QMake",
"bytes": "3710"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.redirects.models import Redirect
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse
from django.db.models import QuerySet, F
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from generic_admin import filters, inlines
class PermissionsControl(admin.ModelAdmin):
"""Roughly defines user rights."""
add = True
change = True
delete = True
def has_add_permission(self, request):
return self.add
def has_change_permission(self, request, obj=None):
return self.change
def has_delete_permission(self, request, obj=None):
return self.delete
class ChangeItemsStateActions(admin.ModelAdmin):
@staticmethod
def after_action_message(updated_rows):
return (
'1 {}'.format(_('item was'))
if updated_rows == 1 else
'{} {}'.format(updated_rows, _('items were'))
)
def make_items_active(self, request, queryset):
updated_rows = queryset.update(is_active=1)
message_prefix = self.after_action_message(updated_rows)
self.message_user(request, '{} {}.'.format(message_prefix, _('marked as active')))
def make_items_non_active(self, request, queryset):
updated_rows = queryset.update(is_active=0)
message_prefix = self.after_action_message(updated_rows)
self.message_user(request, '{} {}.'.format(message_prefix, _('marked as non-active')))
make_items_active.short_description = _('Make active')
make_items_non_active.short_description = _('Make inactive')
class AutoCreateRedirects(admin.ModelAdmin):
"""Create new redirect link after slug field change."""
def save_model(self, request, obj, form, change):
if change and 'slug' in form.changed_data and obj.slug in obj.url:
old_obj = type(obj).objects.get(id=obj.id)
redirect, created = Redirect.objects.get_or_create(
site=get_current_site(request),
old_path=old_obj.url,
defaults={'new_path': obj.url},
)
if not created:
obj.slug = old_obj.slug
self.message_user(
request,
_(
'The slug field wasn\'t saved. Can\'t create a redirect'
' for new slug, because it is already occupied by:'
) + f' {redirect.old_path} -> {redirect.new_path}',
)
super().save_model(request, obj, form, change)
class AbstractPage(ChangeItemsStateActions):
"""Generic class for each page."""
actions = ['make_items_active', 'make_items_non_active']
list_display_links = ['name']
list_filter = [
'is_active',
filters.HasContent,
filters.HasImages,
]
save_on_top = True
search_fields = ['id', 'name', 'slug']
def get_queryset(self, request):
return super(AbstractPage, self).get_queryset(request).select_related('parent')
def custom_parent(self, obj, urlconf=None):
parent = obj.parent
if not parent:
return
urlconf = urlconf or '{}:{}_{}_change'.format(
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.model_name
)
url = reverse(urlconf, args=(parent.id,))
return format_html(
'<a href="{url}">{parent}</a>',
parent=parent,
url=url
)
custom_parent.admin_order_field = 'parent__name'
custom_parent.short_description = _('Parent')
class PageWithoutModels(AbstractPage):
list_display = ['id', 'name', 'slug', 'date_published', 'custom_parent', 'is_active']
readonly_fields = ['id', 'correct_parent_id']
inlines = [inlines.ImageInline]
list_filter = ['is_active', filters.HasContent, filters.HasImages]
def correct_parent_id(self, obj):
"""Needed for correct short_description attr"""
return obj.parent_id
correct_parent_id.short_description = _('Parent ID')
class PageWithModels(AbstractPage, PermissionsControl, AutoCreateRedirects):
readonly_fields = ['id', 'model_id']
fieldsets = (
('Дополнительные характеристики', {
'classes': ('seo-chars',),
'fields': (
'id',
'is_active',
('name', 'slug'),
'date_published',
'menu_title',
'seo_text',
'template',
'position',
)
}),
('Параметры страницы', {
'classes': ('secondary-chars',),
'fields': (
'h1',
'title',
'keywords',
'description',
'content'
)
})
)
@staticmethod
def assert_is_proxy(qs: QuerySet):
"""Is it proxy for only one related model?"""
count = qs.order_by('related_model_name').distinct('related_model_name').count()
assert count <= 1, 'You should split your model pages by proxy, before register it.'
@classmethod
def add_reference_to_field_on_related_model(cls, qs: QuerySet, **kwargs):
cls.assert_is_proxy(qs)
modified_qs = qs.all()
if qs.order_by('related_model_name').distinct('related_model_name').count() == 1:
related_model_name = qs.first().related_model_name
modified_qs = modified_qs.annotate(**{
key: F('{}__{}'.format(related_model_name, value))
for key, value in kwargs.items()
})
return modified_qs
def model_id(self, obj):
return obj.model.id
def get_search_fields(self, request):
"""https://goo.gl/4jVdgn"""
self.assert_is_proxy(self.model.objects.all())
if not self.search_fields:
model_related_name = self.model.objects.first().related_model_name
self.search_fields = ['{}__id'.format(model_related_name), 'name', 'parent__name']
return self.search_fields
| {
"content_hash": "b69d0b2aad7b376a7b83ed711506d27c",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 94,
"avg_line_length": 32.45026178010471,
"alnum_prop": 0.5792191029364311,
"repo_name": "fidals/refarm-site",
"id": "88d628048aa550d946333ac7c13fa997cbe4e526",
"size": "6243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generic_admin/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64305"
},
{
"name": "HTML",
"bytes": "28723"
},
{
"name": "JavaScript",
"bytes": "31422"
},
{
"name": "Python",
"bytes": "296885"
}
],
"symlink_target": ""
} |
import sys
import aleph as al
try:
import numpy as np
except ImportError:
sys.exit(0)
import unittest
class TestSimplexMethods(unittest.TestCase):
def test_construction(self):
pass
M = al.SimplicialComplex([[0], [1], [2], [1, 0], [2, 0]])
diagram = al.calculatePersistenceDiagrams(M)[0]
numpy_diagram = np.array(diagram)
for point, np_point in zip(diagram, numpy_diagram):
assert point.x == np_point[0]
assert point.y == np_point[1]
| {
"content_hash": "503662228a73d5a86ea2cf7904735a5d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 57,
"avg_line_length": 21.227272727272727,
"alnum_prop": 0.6809421841541756,
"repo_name": "Submanifold/Aleph",
"id": "45f4f5029984ffdc0e9a9d59ee3a19a5cb880909",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_python_numpy_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1148700"
},
{
"name": "CMake",
"bytes": "37102"
},
{
"name": "Dockerfile",
"bytes": "1010"
},
{
"name": "Python",
"bytes": "44767"
},
{
"name": "Shell",
"bytes": "954"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
def plotWaveform(audio):
"""
Plotting the waveform of a wav file
"""
spf = wave.open(audio,'r')
signal = spf.readframes(-1)
signal = np.fromstring(signal, 'Int16')
fs = spf.getframerate()
if spf.getnchannels() == 2:
print('Just mono files')
sys.exit(0)
Time=np.linspace(0, len(signal)/fs, num=len(signal))
plt.figure(1)
plt.title('Waveform of an audio')
plt.plot(Time,signal)
plt.show()
| {
"content_hash": "310ff0ca7896a3fe5830f8e39a72bfee",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 56,
"avg_line_length": 25.238095238095237,
"alnum_prop": 0.630188679245283,
"repo_name": "zzw922cn/Automatic_Speech_Recognition",
"id": "aabb3332a1e21581168d5ef30132b73b286ac258",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speechvalley/utils/visualization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "455"
},
{
"name": "Python",
"bytes": "165504"
},
{
"name": "Shell",
"bytes": "1703"
}
],
"symlink_target": ""
} |
import os
import sys
from business_theme import __version__ as version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
install_requires = [
'mezzanine >= 3.1',
]
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
setup(
name='mezzanine-business-theme',
version=version,
description="""Starter business theme for Mezzanine CMS""",
long_description=readme,
author='Dmitry Falk',
author_email='dfalk5@gmail.com',
url='https://github.com/dfalk/mezzanine-business-theme',
packages=[
'business_theme',
'business_theme.templatetags',
],
include_package_data=True,
install_requires=install_requires,
license="BSD",
zip_safe=False,
keywords='mezzanine business theme',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
) | {
"content_hash": "6509c2151e797831458e54d3b7cfb06f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 65,
"avg_line_length": 28.74074074074074,
"alnum_prop": 0.601159793814433,
"repo_name": "dfalk/mezzanine-business-theme",
"id": "1ca9a67a1ce6f9cfb58fc369d13cd28325626a72",
"size": "1600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "152"
},
{
"name": "Python",
"bytes": "6229"
},
{
"name": "Shell",
"bytes": "92"
}
],
"symlink_target": ""
} |
import urllib2, json, os, sys
HEADER = ''' Redirect = {
image: function(board, filename) {
switch (board) {
'''
POST = ''' }
},
post: function(board, postID) {
switch (board) {
'''
TO = ''' }
},
to: function(data) {
var board, threadID, url;
if (!data.isSearch) {
threadID = data.threadID;
}
board = data.board;
switch (board) {
'''
BOTTOM = ''' default:
if (threadID) {
url = "//boards.4chan.org/" + board + "/";
}
}
return url || null;
},
'''
CASE = " case '%s':\n"
RETURN_IMAGE = ' return "%s/" + board + "/full_image/" + filename;\n'
RETURN_POST = ' return "%s/_/api/chan/post/?board=" + board + "&num=" + postID;\n'
RETURN_REDIRECT = """ url = Redirect.path('%s', '%s', data);
break;
"""
ARCHIVES_URL = "https://4chenz.github.io/archives.json/archives.json"
ARCHIVES_JSON = os.path.join(os.path.dirname(os.path.abspath(__file__)), "archives.json")
PRIORITIES_JSON = os.path.join(os.path.dirname(os.path.abspath(__file__)), "priorities.json")
ARCHIVE_HIDDEN = [29,32,35]
def jsonloadf(filename):
with open(filename) as f:
data = json.load(f, 'utf-8')
return data
def jsonsavef(filename, data):
with open(filename, 'wb') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '), encoding='utf-8')
def http_protocol(a):
dom = a['domain']
if a['https'] and a['http']:
return '//' + dom
elif a['https']:
return 'https://' + dom
elif a['http']:
return 'http://' + dom
class Build:
def __init__(self, outstream=sys.stdout, msgstream=sys.stderr):
self.out = outstream
self.msg = msgstream
self.files = {}
self.boards = {}
self.data = None
self.priorities = jsonloadf(PRIORITIES_JSON)
def page_dl(self):
request = urllib2.Request(ARCHIVES_URL)
response = urllib2.urlopen(request)
data = response.read()
response.close()
self.data = json.loads(data)
def boards_list(self):
f = []
b = []
for a in self.data:
f += a['files']
b += a['boards']
self.archivedfiles = list(set(f))
self.archivedboards = list(set(b))
def find_redundant(self):
f = {}
b = {}
for n, a in enumerate(self.data):
for e in a['files']:
if e in f:
f[e].append(n)
else:
f[e] = [n]
for e in a['boards']:
if e in b:
b[e].append(n)
else:
b[e] = [n]
def filterhidden(value):
return filter(lambda x: not (self.data[x]['uid'] in ARCHIVE_HIDDEN and len(value) > 1), value)
self.singleboards = {}
self.redundantboards = {}
for k, v in b.iteritems():
v2 = filterhidden(v)
if len(v2) == 1:
self.singleboards[k] = v2[0]
if len(v2) > 1:
self.redundantboards[k] = v2
self.singlefiles = {}
self.redundantfiles = {}
for k, v in f.iteritems():
v2 = filterhidden(v)
if len(v2) == 1:
self.singlefiles[k] = v2[0]
if len(v2) > 1:
self.redundantfiles[k] = v2
def pprint(self, t):
print >>self.msg, "%s:" % t
if t == 'files':
it = self.redundantfiles.iteritems()
else:
it = self.redundantboards.iteritems()
for k, v in it:
print >>self.msg, "%s --> " % k,
sel = None
selfound = None
if k in self.priorities[t]:
sel = self.priorities[t][k]
for x in v:
if self.data[x]['uid'] == sel:
forstr = "{%s}"
selfound = x
else:
forstr = '"%s"'
print >>self.msg, forstr % self.data[x]['name'],
if sel == None or selfound == None:
print >>self.msg, "NOT SELECTED!"
else:
print >>self.msg
if t == 'files':
self.files[k] = selfound
else:
self.boards[k] = selfound
def prioprint(self):
self.separator()
print >>self.msg, "archives:"
for a in self.data:
if a['uid'] in ARCHIVE_HIDDEN:
print >>self.msg, "HIDDEN:",
print >>self.msg, a['uid'], a['name']
self.separator()
self.pprint('boards')
self.separator()
self.pprint('files')
self.separator()
def merge(self):
self.boards.update(self.singleboards)
self.files.update(self.singlefiles)
def separator(self):
if self.msg == sys.stderr:
print >>self.msg, "-" * 80
def build(self):
if not self.data:
self.page_dl()
#add empty "files" if missing
for d in self.data:
if not "files" in d:
d.update({"files" : []})
#do stuff
self.boards_list()
self.find_redundant()
self.prioprint()
self.merge()
#image
self.out.write(HEADER)
for n, a in enumerate(self.data):
filefound = False
for b in a['files']:
if b in self.files and n == self.files[b]:
filefound = True
self.out.write(CASE % b)
if filefound:
self.out.write(RETURN_IMAGE % http_protocol(a))
self.out.write(POST)
#post
for n, a in enumerate(self.data):
if a['software'] != 'foolfuuka':
continue
boardfound = False
for b in a['boards']:
if b in self.boards and n == self.boards[b]:
boardfound = True
self.out.write(CASE % b)
if boardfound:
self.out.write(RETURN_POST % http_protocol(a))
self.out.write(TO)
#redirect
for n, a in enumerate(self.data):
boardfound = False
for b in a['boards']:
if b in self.boards and n == self.boards[b]:
boardfound = True
self.out.write(CASE % b)
if boardfound:
self.out.write(RETURN_REDIRECT % (http_protocol(a), a['software']))
self.out.write(BOTTOM)
if __name__ == "__main__":
builder = Build()
if len(sys.argv) == 2:
builder.data = jsonloadf(sys.argv[1])
builder.build()
| {
"content_hash": "c2ad99911d1be0c9bf128bbef04d42de",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 97,
"avg_line_length": 24.96818181818182,
"alnum_prop": 0.5851083196795922,
"repo_name": "loadletter/4chan-x",
"id": "670c0318dc6d86fd269bad928dc7ecad0cf6902b",
"size": "5516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archives/generate_redirector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "230246"
},
{
"name": "Python",
"bytes": "5516"
},
{
"name": "Shell",
"bytes": "949"
}
],
"symlink_target": ""
} |
from game_object import GameObject
class Trait(GameObject):
def __init__(self, name, description):
self.name = name
self.description = description
def __str__(self):
return '{}: {}'.format(self.name, self.description)
| {
"content_hash": "331ee90fadbba7e5bee85efb83e21b20",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 59,
"avg_line_length": 25.3,
"alnum_prop": 0.6284584980237155,
"repo_name": "chris-statzer/knuckle-python",
"id": "39a54146118dc7293f9cc9d3c4d6da0b697f4691",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/trait.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "19375"
},
{
"name": "CMake",
"bytes": "11531"
},
{
"name": "Python",
"bytes": "285006"
}
],
"symlink_target": ""
} |
"""8-bit string definition for Python 2/3 compatibility"""
try:
bytes = bytes
try:
_NULL_8_BYTE = bytes( '\000','latin1' )
except TypeError, err:
# 2.6 or 2.7, where bytes is the old str object
_NULL_8_BYTE = bytes( '\000' )
def as_8_bit( x, encoding='utf-8' ):
if isinstance( x,unicode ):
return x.encode(encoding)
return str(x).encode( encoding )
except NameError, err:
bytes = str
_NULL_8_BYTE = '\000'
def as_8_bit( x, encoding='utf-8' ):
if isinstance( x, unicode ):
return x.encode( encoding )
return bytes( x )
| {
"content_hash": "086790896d9a93525f4135b5be6c6ef0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 33.05263157894737,
"alnum_prop": 0.5589171974522293,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "1e29029f9e63c153e4d63f31e035685643056d4d",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/_bytes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from __future__ import annotations
| {
"content_hash": "5de1980dee48c44fc3e850f3a8b673f6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 35,
"alnum_prop": 0.7714285714285715,
"repo_name": "edgedb/edgedb",
"id": "9d48db4f9f85e1752cf424c49ee18a6907c3f160",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edb/lib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "372837"
},
{
"name": "JavaScript",
"bytes": "7481"
},
{
"name": "Makefile",
"bytes": "1159"
},
{
"name": "Python",
"bytes": "9860929"
},
{
"name": "Rust",
"bytes": "238373"
}
],
"symlink_target": ""
} |
import sys
import random
from nose.tools import *
from mock import patch
from docopt import docopt, DocoptExit
from schema import Schema, SchemaError, And, Or, Use
import randfilter
def make_docdict(f, n, p, u, i, h, v):
return {
'-n':n,
'-p':p,
'--unorder':u,
'--ignore-empty':i,
'--help':h,
'--version':v,
'<files>':f
}
class TestIterFiles(object):
def test_iter_files(self):
files = [open("testfiles/testfile1"), open("testfiles/testfile2")]
lines = files[0].readlines() + files[1].readlines()
for i, line in enumerate(randfilter.iter_files(files)):
eq_(line, lines[i])
def test_iter_files_ignore_empty(self):
files = [open("testfiles/testfile1"), open("testfiles/testfile2")]
lines = []
correct_length = 0
for i in range(20):
lines.append(str(i+1)+"\n")
correct_length += 1
length = 0
for i, line in enumerate(randfilter.iter_files(files, True)):
length += 1
eq_(line, lines[i])
eq_(correct_length, length)
class TestCommandlineArgs(object):
def parse_args(self, argv):
return docopt(randfilter.__doc__, argv)
def test_normal_case(self):
argv = ["-n", "10", "LICENSE", "README.rst"]
args = self.parse_args(argv)
correct_dict = make_docdict(["LICENSE", "README.rst"], "10", None, False, False, False, False)
eq_(args, correct_dict)
def test_normal_case2(self):
argv = ["-n", "2", "-u"]
args = self.parse_args(argv)
correct_dict = make_docdict([], "2", None, True, False, False, False)
eq_(args, correct_dict)
def test_normal_case3(self):
argv = ["-p", "0.2", "-u", "-i"]
args = self.parse_args(argv)
correct_dict = make_docdict([], None, "0.2", True, True, False, False)
eq_(args, correct_dict)
@raises(DocoptExit)
def test_no_options(self):
argv = ["LICENSE", "README.rst"]
args = self.parse_args(argv)
@raises(DocoptExit)
def test_duplicate_options(self):
argv = ["-n", "10", "-n" "8", "LICENSE", "README.rst"]
args = self.parse_args(argv)
@raises(DocoptExit)
def test_exclusive_options(self):
argv = ["-n", "10", "-p" "0.8", "LICENSE", "README.rst"]
args = self.parse_args(argv)
class TestValidateArgValues(object):
def validate(self, *k):
args = make_docdict(*k)
return randfilter.validate_args(args)
def test_type(self):
args = make_docdict([], "1", "0.5", False, False, False, False)
args = randfilter.validate_args(args)
eq_(type(args["<files>"]), list)
eq_(type(args["-n"]), int)
eq_(type(args["-p"]), float)
eq_(type(args["--unorder"]), bool)
eq_(type(args["--ignore-empty"]), bool)
eq_(type(args["--help"]), bool)
eq_(type(args["--version"]), bool)
@raises(SchemaError)
def test_validate_n_type(self):
"""should be interger"""
args = self.validate([], "0.1", None, False, False, False, False)
def test_validate_n_range(self):
"""should be positive interger"""
args = self.validate([], "0", None, False, False, False, False)
eq_(args["-n"], 0)
args = self.validate([], "1000000000000", "0.5", False, False, False, False)
eq_(args["-n"], 1000000000000)
@raises(SchemaError)
def test_validate_n_range2(self):
args = self.validate([], "-1", None, False, False, False, False)
def test_validate_p_type(self):
"""should be float"""
args = self.validate([], None, "1", False, False, False, False)
eq_(args["-p"], 1.0)
@raises(SchemaError)
def test_validate_p_type2(self):
args = self.validate([], None, "hoge", False, False, False, False)
def test_validate_p_range(self):
"""should be float 0 <= N <= 1.0"""
args = self.validate([], None, "0.0", False, False, False, False)
eq_(args["-p"], 0.0)
args = self.validate([], None, "1.0", False, False, False, False)
eq_(args["-p"], 1.0)
@raises(SchemaError)
def test_validate_p_range2(self):
args = self.validate([], None, "-0.1", False, False, False, False)
@raises(SchemaError)
def test_validate_p_range2(self):
args = self.validate([], None, "1.1", False, False, False, False)
def test_validate_files(self):
args = self.validate([], None, "0.1", False, False, False, False)
eq_(args["<files>"], [sys.stdin])
args = self.validate(["LICENSE"], None, "0.1", False, False, False, False)
import io
ok_(type(args["<files>"][0] is io.IOBase))
@raises(SchemaError)
def test_dummy_files(self):
args = self.validate(["HOGE"], None, "0.1", False, False, False, False)
print(args)
class TestChooseRandomLinesProbability(object):
@patch("random.random", lambda: 0)
def test_all(self):
filename = "testfiles/testfile1"
f = [open(filename)]
it = randfilter.iter_files(f, False)
l = randfilter.choose_random_lines_probability(it, 0.5, False)
lines = open(filename).readlines()
for i, item in enumerate(l):
eq_(item, lines[i])
@patch("random.random", lambda: 0.5)
def test_border(self):
f = [open("testfiles/testfile1")]
it = randfilter.iter_files(f, False)
l = randfilter.choose_random_lines_probability(it, 0.5, False)
eq_(l, [])
@patch("random.random", lambda: 0)
def test_order(self):
f = [open("testfiles/testfile1")]
it = randfilter.iter_files(f, True)
l = randfilter.choose_random_lines_probability(it, 0.5, False)
for i, line in enumerate(l):
eq_(line, str(i+1)+"\n")
def test_unorder(self):
f = [open("testfiles/testfile1")]
it = randfilter.iter_files(f, False)
l = randfilter.choose_random_lines_probability(it, 1, True)
correct_lines= open("testfiles/testfile1").readlines()
ok_(l != correct_lines)
class TestChooseRandomLinesNum(object):
def test_all(self):
filename = "testfiles/testfile1"
f = [open(filename)]
it = randfilter.iter_files(f, False)
l = randfilter.choose_random_lines_num(it, 100, False)
lines = open(filename).readlines()
for i, item in enumerate(l):
eq_(item, lines[i])
def test_zero(self):
f = [open("testfiles/testfile1")]
it = randfilter.iter_files(f, False)
l = randfilter.choose_random_lines_num(it, 0, False)
eq_(l, [])
def test_order(self):
f = [open("testfiles/testfile1")]
it = randfilter.iter_files(f, True)
l = randfilter.choose_random_lines_num(it, 100, False)
for i, line in enumerate(l):
eq_(line, str(i+1)+"\n")
def test_unorder(self):
f = [open("testfiles/testfile1")]
it = randfilter.iter_files(f, False)
l = randfilter.choose_random_lines_num(it, 100, True)
correct_lines= open("testfiles/testfile1").readlines()
ok_(l != correct_lines)
| {
"content_hash": "af072910e668dd1b5ed54ea13ce84e55",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 102,
"avg_line_length": 30.347280334728033,
"alnum_prop": 0.5683165586653799,
"repo_name": "ton1517/randfilter",
"id": "20d0029dada9de290eeff34b02ca38f4a381b0fe",
"size": "7278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_randfilter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13402"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
@login_not_required
@no_csrf
def index(nome='paulo',sobrenome='henrique'):
dct = {'name':nome,'lastname':sobrenome}
return TemplateResponse(dct)
#return TemplateResponse(template_path='/home.html')#chamo o arquivo "home.html" da pasta templates | {
"content_hash": "80ba0a94658f2c2703f8d113e1672e40",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 103,
"avg_line_length": 42.27272727272727,
"alnum_prop": 0.7849462365591398,
"repo_name": "HenriquePaulo/projeto",
"id": "70d9cd7b95dca292d3f7768eabd1379ca58ddeb9",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/appengine/routes/rota/home.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "CSS",
"bytes": "543"
},
{
"name": "HTML",
"bytes": "70089"
},
{
"name": "JavaScript",
"bytes": "6638"
},
{
"name": "Python",
"bytes": "100433"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
} |
class GtkSharp212ReleasePackage (Package):
def __init__ (self):
Package.__init__ (self, 'gtk-sharp', '2.12.21', sources = ['http://download.mono-project.com/sources/gtk-sharp212/gtk-sharp-2.12.21.tar.gz'])
# self.configure = './bootstrap-2.12 --prefix="%{prefix}"'
self.make = 'make CSC=gmcs'
GtkSharp212ReleasePackage ()
| {
"content_hash": "4e2efd693bd7dd92057d2667aa5b3b91",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 143,
"avg_line_length": 47.142857142857146,
"alnum_prop": 0.6848484848484848,
"repo_name": "BansheeMediaPlayer/bockbuild",
"id": "f4c2f9b9433259df7b779fa51f203ace860d0d4d",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/gtk-sharp-2.12-release.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "35710"
},
{
"name": "Makefile",
"bytes": "2017"
},
{
"name": "Python",
"bytes": "200837"
},
{
"name": "Shell",
"bytes": "36817"
}
],
"symlink_target": ""
} |
"""Convert Numbers to Words.
This package is mostly meant for financial purposes. Where you need to convert
some number into words.
For instance :
1. (1001 - One Thousand And One)
2. (2032 - Two Thousand And Thirty Two)
3. (10,001 - Ten Thousand And One)
"""
import math
class NumbersBaseWrapper(object):
"""Base wrapper class."""
hyphen = '-'
conjunction = ' and '
separator = ', '
negative = 'negative '
decimal = ' point '
space = ' '
dictionary = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
30: 'thirty',
40: 'fourty',
50: 'fifty',
60: 'sixty',
70: 'seventy',
80: 'eighty',
90: 'ninety',
100: 'hundred',
1000: 'thousand',
1000000: 'million',
1000000000: 'billion',
1000000000000: 'trillion',
1000000000000000: 'quadrillion',
1000000000000000000: 'quintillion'
}
class NumbersToWord(NumbersBaseWrapper):
"""Convert Numbers to words."""
def title_case(self, string):
"""Title case a string."""
try:
assert isinstance(string, str), ('Ensure you pass a string.')
words = string.split(" ")
new_string = ''
for word in words:
new_string += word.capitalize() + self.space
return new_string
except AssertionError as e:
raise e
def number_to_words(self, number):
"""Convert a number into words."""
if not isinstance(number, (int, float,)):
# Try to convert the string to a number incase the use passed
# a quoted digit.
try:
number = int(number)
self.number_to_words(number)
except ValueError as e:
raise e
if number < 0:
negative = self.title_case(self.negative.replace(' ', ''))
return negative + self.number_to_words(abs(number))
string = fraction = None
if "." in str(number):
number, fraction = str(number).split('.')
number = int(number)
fraction = int(fraction) if fraction else fraction
if number < 21:
string = self.dictionary[number]
elif number < 100:
# Get tens and ones
tens = int(number / 10) * 10
units = number % 10
string = self.dictionary[tens]
if units:
string += self.hyphen + self.dictionary[units]
elif number < 1000:
# Get hundreds and make a recursive call with the remainder
hundreds = int(number / 100)
remainder = number % 100
string = self.dictionary[hundreds] + self.space + \
self.dictionary[100]
if remainder:
# Make a recursive call
string += self.conjunction + self.number_to_words(remainder)
else:
# Make a recursive call for the num_base_units
log = math.floor(math.log(number, 1000))
base_unit = math.pow(1000, log)
num_base_units = int(number / base_unit)
remainder = number % base_unit
string = self.number_to_words(num_base_units) + self.space + \
self.dictionary[base_unit]
if remainder:
# Make a recurssive call for the remainder
string += self.conjunction if remainder < 100 else \
self.separator
string += self.number_to_words(remainder)
if (fraction is not None) and isinstance(fraction, int):
string += self.decimal
words = ''
for each in str(fraction):
words += self.dictionary[int(each)] + self.space
string += words
result = self.title_case(string.replace(" ", " "))
length = len(result)
# Remove the trailing space
return result[:length - 1]
| {
"content_hash": "cbfdf392d07fdcc4a2a5229c48fbb59b",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 30.193103448275863,
"alnum_prop": 0.5166742804933759,
"repo_name": "yoda-yoda/numbers-to-words",
"id": "3071a3369958446bfadaa94ed106cef6d578498b",
"size": "4378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wordsapp/number_to_words.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8648"
}
],
"symlink_target": ""
} |
import time
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
from grr.lib.aff4_objects import cronjobs
class FakeCronJob(flow.GRRFlow):
"""A Cron job which does nothing."""
lifetime = rdfvalue.Duration("1d")
@flow.StateHandler(next_state="End")
def Start(self):
self.CallState(next_state="End")
class FailingFakeCronJob(flow.GRRFlow):
"""A Cron job that only fails."""
@flow.StateHandler(next_state="End")
def Start(self):
raise RuntimeError("Oh, no!")
class OccasionallyFailingFakeCronJob(flow.GRRFlow):
"""A Cron job that only fails."""
@flow.StateHandler(next_state="End")
def Start(self):
if time.time() > 30:
raise RuntimeError("Oh, no!")
class DummySystemCronJob(cronjobs.SystemCronFlow):
"""Dummy system cron job."""
lifetime = rdfvalue.Duration("42h")
frequency = rdfvalue.Duration("42d")
@flow.StateHandler(next_state="End")
def Start(self):
self.CallState(next_state="End")
class DummyStatefulSystemCronJob(cronjobs.StatefulSystemCronFlow):
"""Dummy stateful system cron job."""
VALUES = []
@flow.StateHandler()
def Start(self):
state = self.ReadCronState()
value = state.get("value", default=0)
DummyStatefulSystemCronJob.VALUES.append(value)
state.Register("value", value + 1)
self.WriteCronState(state)
class CronTest(test_lib.AFF4ObjectTest):
"""Tests for cron functionality."""
def testCronJobPreservesFlowNameAndArguments(self):
"""Testing initialization of a ConfigManager."""
pathspec = rdfvalue.PathSpec(path="/foo",
pathtype=rdfvalue.PathSpec.PathType.TSK)
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(periodicity="1d",
allow_overruns=False)
cron_args.flow_runner_args.flow_name = "GetFile"
cron_args.flow_args.pathspec = pathspec
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
# Check that CronJob definition is saved properly
cron_root = aff4.FACTORY.Open(cron_manager.CRON_JOBS_PATH, token=self.token)
cron_jobs = list(cron_root.ListChildren())
self.assertEqual(len(cron_jobs), 1)
self.assertEqual(cron_jobs[0], cron_job_urn)
cron_job = aff4.FACTORY.Open(cron_jobs[0], token=self.token)
cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)
self.assertEqual(cron_args.flow_runner_args.flow_name, "GetFile")
self.assertEqual(cron_args.flow_args.pathspec, pathspec)
self.assertEqual(cron_args.periodicity, rdfvalue.Duration("1d"))
self.assertEqual(cron_args.allow_overruns, False)
def testCronJobStartsFlowAndCreatesSymlinkOnRun(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
# The job never ran, so DueToRun() should return true.
self.assertTrue(cron_job.DueToRun())
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
# Check that a link to the flow is created under job object.
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Check that the link points to the correct flow.
cron_job_flow = aff4.FACTORY.Open(cron_job_flows[0], token=self.token)
self.assertEqual(cron_job_flow.state.context.args.flow_name, "FakeCronJob")
def testDisabledCronJobDoesNotScheduleFlows(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn1 = cron_manager.ScheduleFlow(cron_args, token=self.token)
cron_job_urn2 = cron_manager.ScheduleFlow(cron_args, token=self.token)
cron_job1 = aff4.FACTORY.Open(cron_job_urn1, aff4_type="CronJob",
mode="rw", token=self.token)
cron_job1.Set(cron_job1.Schema.DISABLED(1))
cron_job1.Close()
cron_manager.RunOnce(token=self.token)
cron_job1 = aff4.FACTORY.Open(cron_job_urn1, aff4_type="CronJob",
token=self.token)
cron_job2 = aff4.FACTORY.Open(cron_job_urn2, aff4_type="CronJob",
token=self.token)
# Disabled flow shouldn't be running, while not-disabled flow should run
# as usual.
self.assertFalse(cron_job1.IsRunning())
self.assertTrue(cron_job2.IsRunning())
def testCronJobRunMonitorsRunningFlowState(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(allow_overruns=False,
periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args, token=self.token)
# Run() wasn't called, so nothing is supposed to be running
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
cron_manager.RunOnce(token=self.token)
# Run() was called and flow was started, so the job should be
# considered running.
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
# Find the flow that is currently running for the job and terminate it.
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
cron_job_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
self.assertTrue(cron_job_flow_urn is not None)
flow.GRRFlow.TerminateFlow(cron_job_flow_urn, token=self.token)
# Check we're dead
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
# This will understand that current flow has terminated. New flow won't be
# started, because iterations are supposed to be started once per day
# (frequency=1d).
cron_manager.RunOnce(token=self.token)
# Still dead
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
def testCronJobRunDoesNothingIfCurrentFlowIsRunning(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(allow_overruns=False,
periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
def testCronJobRunDoesNothingIfDueTimeHasNotComeYet(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=False, periodicity="1h")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Let 59 minutes pass. Frequency is 1 hour, so new flow is not
# supposed to start.
time.time = lambda: 59 * 60
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
def testCronJobRunPreventsOverrunsWhenAllowOverrunsIsFalse(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=False, periodicity="1h")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Let an hour pass. Frequency is 1h (i.e. cron job iterations are
# supposed to be started every hour), so the new flow should be started
# by RunOnce(). However, as allow_overruns is False, and previous
# iteration flow hasn't finished yet, no flow will be started.
time.time = lambda: 60*60 + 1
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
def testCronJobRunAllowsOverrunsWhenAllowOverrunsIsTrue(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1h")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Let an hour pass. Frequency is 1h (i.e. cron job iterations are
# supposed to be started every hour), so the new flow should be started
# by RunOnce(). Previous iteration flow hasn't finished yet, but
# allow_overruns is True, so it's ok to start new iteration.
time.time = lambda: 60*60 + 1
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 2)
def testCronManagerListJobsDoesNotListDeletedJobs(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_jobs = list(cron_manager.ListJobs(token=self.token))
self.assertEqual(len(cron_jobs), 1)
cron_manager.DeleteJob(cron_job_urn, token=self.token)
cron_jobs = list(cron_manager.ListJobs(token=self.token))
self.assertEqual(len(cron_jobs), 0)
def testKillOldFlows(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_args.periodicity = "1w"
cron_args.lifetime = FakeCronJob.lifetime
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
self.assertFalse(cron_job.KillOldFlows())
prev_timeout_value = stats.STATS.GetMetricValue(
"cron_job_timeout", fields=[cron_job_urn.Basename()])
prev_latency_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
# Fast foward one day
with test_lib.FakeTime(24*60*60 + 1):
flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
# Check the termination log
log_collection = aff4.FACTORY.Open(urn=flow_urn.Add("Logs"),
token=self.token, mode="r")
for line in log_collection:
if line.urn == flow_urn:
self.assertTrue("lifetime exceeded" in str(line.log_message))
# Check that timeout counter got updated.
current_timeout_value = stats.STATS.GetMetricValue(
"cron_job_timeout", fields=[cron_job_urn.Basename()])
self.assertEqual(current_timeout_value - prev_timeout_value, 1)
# Check that latency stat got updated.
current_latency_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
self.assertEqual(current_latency_value.count - prev_latency_value.count,
1)
self.assertEqual(current_latency_value.sum - prev_latency_value.sum,
24*60*60 + 1)
def testFailedFlowUpdatesStats(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(allow_overruns=False,
periodicity="1d")
cron_args.flow_runner_args.flow_name = "FailingFakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
prev_metric_value = stats.STATS.GetMetricValue(
"cron_job_failure", fields=[cron_job_urn.Basename()])
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, token=self.token)
cron_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
for _ in test_lib.TestFlowHelper(cron_flow_urn,
check_flow_errors=False,
token=self.token):
pass
# This RunOnce call should determine that the flow has failed
cron_manager.RunOnce(token=self.token)
# Check that stats got updated
current_metric_value = stats.STATS.GetMetricValue(
"cron_job_failure", fields=[cron_job_urn.Basename()])
self.assertEqual(current_metric_value - prev_metric_value, 1)
def testLatencyStatsAreCorrectlyRecorded(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_args.periodicity = "1w"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_manager.RunOnce(token=self.token)
prev_metric_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
# Fast foward one minute
with test_lib.FakeTime(60):
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
for _ in test_lib.TestFlowHelper(cron_flow_urn,
check_flow_errors=False,
token=self.token):
pass
# This RunOnce call should determine that the flow has finished
cron_manager.RunOnce(token=self.token)
# Check that stats got updated
current_metric_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
self.assertEqual(current_metric_value.count - prev_metric_value.count, 1)
self.assertEqual(current_metric_value.sum - prev_metric_value.sum, 60)
def testSchedulingJobWithFixedNamePreservesTheName(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token, job_name="TheJob")
self.assertEqual("TheJob", cron_job_urn.Basename())
def testReschedulingJobWithFixedNameDoesNotCreateNewObjectVersion(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
# Schedule cron job with a fixed name. Check that we have 1 version
# of "TYPE" attribute.
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token, job_name="TheJob")
cron_job = aff4.FACTORY.Open(cron_job_urn, age=aff4.ALL_TIMES,
token=self.token)
attr_values = list(cron_job.GetValuesForAttribute(cron_job.Schema.TYPE))
self.assertTrue(len(attr_values) == 1)
# Reschedule the job. Check that we still have only one "TYPE" version.
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token, job_name="TheJob")
cron_job = aff4.FACTORY.Open(cron_job_urn, age=aff4.ALL_TIMES,
token=self.token)
attr_values = list(cron_job.GetValuesForAttribute(cron_job.Schema.TYPE))
self.assertTrue(len(attr_values) == 1)
def testLastRunStatusGetsUpdatedOnEveryRun(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "OccasionallyFailingFakeCronJob"
cron_args.periodicity = "30s"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
for fake_time in [0, 60]:
with test_lib.FakeTime(fake_time):
# This call should start a new cron job flow
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
for _ in test_lib.TestFlowHelper(cron_flow_urn,
check_flow_errors=False,
token=self.token):
pass
# This RunOnce call should determine that the flow has finished
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, age=aff4.ALL_TIMES,
token=self.token)
statuses = list(cron_job.GetValuesForAttribute(
cron_job.Schema.LAST_RUN_STATUS))
statuses = sorted(statuses, key=lambda x: x.age)
self.assertEqual(len(statuses), 2)
self.assertEqual(statuses[0].age,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(0))
self.assertEqual(statuses[1].age,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(60))
self.assertEqual(statuses[0].status, rdfvalue.CronJobRunStatus.Status.OK)
self.assertEqual(statuses[1].status, rdfvalue.CronJobRunStatus.Status.ERROR)
def testSystemCronFlowsGetScheduledAutomatically(self):
config_lib.CONFIG.Set("Cron.enabled_system_jobs", ["DummySystemCronJob"])
cronjobs.ScheduleSystemCronFlows(token=self.token)
jobs = cronjobs.CRON_MANAGER.ListJobs(token=self.token)
dummy_jobs = [j for j in jobs
if j.Basename() == "DummySystemCronJob"]
self.assertTrue(dummy_jobs)
# System cron job should be enabled by default.
job = aff4.FACTORY.Open(dummy_jobs[0], aff4_type="CronJob",
token=self.token)
self.assertFalse(job.Get(job.Schema.DISABLED))
def testSystemCronFlowsMayBeDisabledViaConfig(self):
config_lib.CONFIG.Set("Cron.enabled_system_jobs", ["DummySystemCronJob"])
cronjobs.ScheduleSystemCronFlows(token=self.token)
jobs = cronjobs.CRON_MANAGER.ListJobs(token=self.token)
dummy_jobs = [j for j in jobs
if j.Basename() == "DummySystemCronJob"]
self.assertTrue(dummy_jobs)
# System cron job should be enabled.
job = aff4.FACTORY.Open(dummy_jobs[0], aff4_type="CronJob",
token=self.token)
self.assertFalse(job.Get(job.Schema.DISABLED))
# Now remove the cron job from the list and check that it gets disabled
# after next ScheduleSystemCronFlows() call.
config_lib.CONFIG.Set("Cron.enabled_system_jobs", [])
cronjobs.ScheduleSystemCronFlows(token=self.token)
# This cron job should be disabled, because it's listed in
# Cron.disabled_system_jobs config variable.
job = aff4.FACTORY.Open(dummy_jobs[0], aff4_type="CronJob",
token=self.token)
self.assertTrue(job.Get(job.Schema.DISABLED))
def testScheduleSystemCronFlowsRaisesWhenFlowCanNotBeFound(self):
config_lib.CONFIG.Set("Cron.enabled_system_jobs", ["NonExistent"])
self.assertRaises(KeyError, cronjobs.ScheduleSystemCronFlows,
token=self.token)
def testStatefulSystemCronFlowRaisesWhenRunningWithoutCronJob(self):
self.assertRaises(cronjobs.StateReadError, flow.GRRFlow.StartFlow,
flow_name="DummyStatefulSystemCronJob",
token=self.token)
def testStatefulSystemCronFlowMaintainsState(self):
DummyStatefulSystemCronJob.VALUES = []
config_lib.CONFIG.Set("Cron.enabled_system_jobs",
["DummyStatefulSystemCronJob"])
cronjobs.ScheduleSystemCronFlows(token=self.token)
flow.GRRFlow.StartFlow(flow_name="DummyStatefulSystemCronJob",
token=self.token)
flow.GRRFlow.StartFlow(flow_name="DummyStatefulSystemCronJob",
token=self.token)
flow.GRRFlow.StartFlow(flow_name="DummyStatefulSystemCronJob",
token=self.token)
self.assertListEqual(DummyStatefulSystemCronJob.VALUES, [0, 1, 2])
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "a34c47e813cc90839d271610919812c7",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 80,
"avg_line_length": 39.46308724832215,
"alnum_prop": 0.6507227891156463,
"repo_name": "defaultnamehere/grr",
"id": "c47f5bd724e9ac89f08032a9d1d6e5f48e02b8d4",
"size": "23542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/aff4_objects/cronjobs_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36345"
},
{
"name": "JavaScript",
"bytes": "831633"
},
{
"name": "Makefile",
"bytes": "5939"
},
{
"name": "Python",
"bytes": "4541648"
},
{
"name": "Shell",
"bytes": "31077"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: agent.py
创 建 者: zhuangshixiong
创建日期: 2015-08-26
"""
import urllib
import operator
import json
from difflib import Differ
from tornado.web import authenticated
from peewee import OperationalError
from kazoo.exceptions import NoNodeError
from handler.bases import CommonBaseHandler
from handler.bases import ArgsMap
from lib import route
from lib.excel import ExcelWorkBook
from model.db.zd_qconf_agent import ZdQconfAgent
from model.db.zd_zookeeper import ZdZookeeper
from service import zookeeper as ZookeeperService
from conf import log
from functools import reduce
@route(r'/config/agent/index', '查看')
class ZdQconfAgentIndexHandler(CommonBaseHandler):
'''index, 查看
'''
args_list = [
ArgsMap('pageSize', 'page_size', default=30),
ArgsMap('pageCurrent', 'current_page', default=1),
ArgsMap('orderDirection', 'order_direction', default="asc"),
ArgsMap('orderField', 'order_field', default="id"),
]
@authenticated
def response(self):
'''index
'''
clauses = self.parse_query(ZdQconfAgent)
order = getattr(ZdQconfAgent, self.order_field)
records = ZdQconfAgent.select().order_by(
getattr(order, self.order_direction)()
).where(reduce(operator.and_, clauses))
self.render('config/agent/index.html',
action='/config/agent/index',
total=records.count(),
current_page=self.current_page,
page_size=self.page_size,
records=records.paginate(self.current_page, self.page_size))
@route(r'/config/agent/watch', '观察')
class WsAgentWatchHandler(CommonBaseHandler):
'''watch, 观察
'''
args_list = [
ArgsMap('agent_register_prefix', default="/qconf/__qconf_register_hosts")
]
@authenticated
def response(self):
'''watch
'''
clusters = ZdZookeeper.select().where(ZdZookeeper.deleted == "0")
self.render('config/agent/watch.html',
clusters=clusters,
agent_register_prefix=self.agent_register_prefix)
@route(r'/config/agent/checkagents', '检查agents')
class WsAgentCheckAgentsHandler(CommonBaseHandler):
'''check agents
'''
args_list = [
ArgsMap('cluster_name', required=True),
ArgsMap('agent_register_prefix', default="/qconf/__qconf_register_hosts")
]
@authenticated
def response(self):
'''watch
'''
zoo_client = ZookeeperService.get_zoo_client(self.cluster_name)
if not zoo_client:
return self.ajax_popup(code=300, msg="连接zookeeper出错!")
try:
zk_agents = zoo_client.get_children(self.agent_register_prefix)
except NoNodeError:
return self.ajax_popup(code=300, msg="节点路径不存在!")
records = ZdQconfAgent.select().where(
(ZdQconfAgent.cluster_name == self.cluster_name) &
(ZdQconfAgent.deleted == '0')
)
mysql_agents = [record.hostname for record in records]
# agent在mysql上的统计信息和在zookeeper上注册信息的对比
agents_stat = []
for diff_info in Differ().compare(mysql_agents, zk_agents):
agent_name = diff_info[2:]
if diff_info[0] == "+":
cmp_res = ['无', agent_name]
elif diff_info[0] == "-":
cmp_res = [agent_name, '无']
else:
cmp_res = [agent_name, agent_name]
agents_stat.append(cmp_res)
return agents_stat
@route(r'/config/agent/search')
class ZdQconfAgentSearchHandler(CommonBaseHandler):
'''search,搜索
'''
args_list = [
ArgsMap('pageSize', 'page_size', default=30),
ArgsMap('pageCurrent', 'current_page', default=1),
ArgsMap('orderDirection', 'order_direction', default="asc"),
ArgsMap('orderField', 'order_field', default="id"),
]
@authenticated
def response(self):
'''search
'''
clauses = self.parse_query(ZdQconfAgent)
order = getattr(ZdQconfAgent, self.order_field)
records = ZdQconfAgent.select().order_by(
getattr(order, self.order_direction)()
).where(reduce(operator.and_, clauses))
self.render('config/agent/datagrid.html',
total=records.count(),
current_page=self.current_page,
page_size=self.page_size,
records=records.paginate(self.current_page, self.page_size))
@route(r'/config/agent/save')
class ZdQconfAgentSaveHandler(CommonBaseHandler):
"""save
"""
args_list = [
ArgsMap('id', default=''),
ArgsMap('ip', default=''),
ArgsMap('hostname', default=''),
ArgsMap('cluster_name', default=''),
ArgsMap('notes', default=''),
ArgsMap('create_user', default=''),
ArgsMap('create_time', default=''),
ArgsMap('update_user', default=''),
ArgsMap('update_time', default=''),
ArgsMap('deleted', default=''),
]
@authenticated
def response(self):
'''add
'''
if self.id:
# 修改记录
tb_inst = ZdQconfAgent.one(id=self.id)
else:
# 新增记录
tb_inst = ZdQconfAgent()
if self.id:
tb_inst.id = self.id
if self.ip:
tb_inst.ip = self.ip
if self.hostname:
tb_inst.hostname = self.hostname
if self.cluster_name:
tb_inst.cluster_name = self.cluster_name
if self.notes:
tb_inst.notes = self.notes
if self.create_user:
tb_inst.create_user = self.create_user
if self.create_time:
tb_inst.create_time = self.create_time
if self.update_user:
tb_inst.update_user = self.update_user
if self.update_time:
tb_inst.update_time = self.update_time
if self.deleted:
tb_inst.deleted = self.deleted
tb_inst.save()
return self.ajax_ok(forward="/config/agent/index")
@route(r'/config/agent/add', '新增')
class ZdQconfAgentAddHandler(CommonBaseHandler):
'''add, 新增
'''
@authenticated
def response(self):
'''add
'''
clusters = ZdZookeeper.select().where(ZdZookeeper.deleted == "0")
return self.render('config/agent/add.html',
action='config/agent/save',
clusters=clusters)
@route(r'/config/agent/edit', '修改')
class ZdQconfAgentEditHandler(CommonBaseHandler):
"""edit, 修改
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''edit
'''
if self.info_ids:
id_li = self.info_ids.split(',')
if len(id_li) != 1:
return self.ajax_popup(close_current=False, code=300, msg="请选择单条记录进行修改")
clusters = ZdZookeeper.select().where(ZdZookeeper.deleted == "0")
record = ZdQconfAgent.one(id=id_li[0])
return self.render('config/agent/edit.html',
action='/config/agent/save',
clusters=clusters,
record=record)
else:
return self.ajax_popup(close_current=False, code=300, msg="请选择某条记录进行修改")
@route(r'/config/agent/delete', '删除')
class ZdQconfAgentDeleteHandler(CommonBaseHandler):
"""delete, 删除
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''delete
'''
if not self.info_ids:
return self.ajax_popup(close_current=False, code=300, msg="请选择某条记录进行删除")
id_list = self.info_ids.split(',')
try:
del_query = ZdQconfAgent.delete().where(ZdQconfAgent.id << id_list)
del_query.execute()
except OperationalError as exc:
log.error("error occurred while delete agents, ids: %s\n%s", id_list, str(exc))
return self.ajax_popup(close_current=False, code=300, msg="删除失败!")
return self.ajax_ok(close_current=False)
@route(r'/config/agent/export', '导出')
class ZdQconfAgentExportHandler(CommonBaseHandler):
"""export,导出数据到excel
"""
args_list = [
ArgsMap('info_ids', default=''),
]
def response(self):
'''导出选中数据到excel中
'''
id_li = self.info_ids.split(',')
sheet_text = ZdQconfAgent.select().where(ZdQconfAgent.id << id_li)
sheet_title = [
{'name': 'ip'},
{'name': '主机名'},
{'name': '说明'},
]
bind_attr = (
'ip',
'hostname',
'notes',
)
ewb = ExcelWorkBook()
sheet_name = ZdQconfAgent._meta.db_table
ewb.add_sheet(sheet_name)
ewb.add_title(sheet_name, sheet_title)
ewb.add_text(sheet_name, sheet_text, bind=bind_attr)
filename = '{}.xls'.format(sheet_name)
filename = urllib.urlencode({'filename': filename})
self.set_header('Content-Disposition', 'attachment;{}'.format(filename))
self.finish(ewb.get_stream())
| {
"content_hash": "0a66402c710eefabb24dde346594279e",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 91,
"avg_line_length": 31.091216216216218,
"alnum_prop": 0.5747039009018798,
"repo_name": "wolfelee/zkdash",
"id": "546ac3b2e5eeefb18738dee46e79f3a1fe88e878",
"size": "9520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handler/config/agent.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "202395"
},
{
"name": "HTML",
"bytes": "78547"
},
{
"name": "JavaScript",
"bytes": "1737634"
},
{
"name": "PHP",
"bytes": "44496"
},
{
"name": "Python",
"bytes": "114463"
},
{
"name": "Shell",
"bytes": "404"
}
],
"symlink_target": ""
} |
'''
config constants for training neural net
'''
# the remainder is allocated as test data
PERCENT_FOR_TRAINING_DATA = .90
# the patch size for both the 32 and 64 feature convolutions
# used with an NxN tile, where N has usually been 64
CONVOLUTION_PATCH_SIZE = 5
# where training data gets cached from bin/create_training_data.py
CACHE_PATH = '/data/cache/'
| {
"content_hash": "ee5c65c21aeb736af623bc1f935873ac",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 28.307692307692307,
"alnum_prop": 0.7391304347826086,
"repo_name": "silberman/Deep-OSM",
"id": "0a64f846e20f1ffc7fe69b0d1db607074fa9ca13",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13293"
},
{
"name": "Makefile",
"bytes": "1011"
},
{
"name": "Python",
"bytes": "45804"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import absolute_import
import uuid
__author__ = "czerwin@scalyr.com"
import platform
import re
import socket
import sys
import time
import io
import ssl
import os
import six
from six.moves import map
from six.moves import range
import six.moves.http_client
from scalyr_agent.util import verify_and_get_compress_func
from scalyr_agent.configuration import Configuration
from scalyr_agent import __scalyr__
import scalyr_agent.scalyr_logging as scalyr_logging
import scalyr_agent.util as scalyr_util
from scalyr_agent.connection import ConnectionFactory
# Maximum request body size (in characters / bytes) we log under DEBUG 5 log level. If the body is
# larger than this value, we truncate it. This way we prevent debug log file from growing too large
# and we avoid memory leak under Python 2.7 when logging very large bodies with unicode data.
MAX_REQUEST_BODY_SIZE_LOG_MSG_LIMIT = 2048
log = scalyr_logging.getLogger(__name__)
# The timestamp of the previous message - onlyl used when `enforce_monotonic_timestamps` is True.
# This is used to support an old requirement on the `addEvents` API. It used to be the
# server required that all events uploaded in the same session must have monotonically
# increasing event timestamps. It was illegal to add an event with a lower timestamp than
# any event previously uploaded to the session. This is no longer a requirement, but we are
# protecting the enforcement behind a flag in case any issues arise. We can delete this
# code once it has been out for a few months with no problems.
__last_time_stamp__ = None
def _set_last_timestamp(val):
"""
exposed for testing
"""
global __last_time_stamp__
__last_time_stamp__ = val
def create_new_client(config, api_key=None):
result = None
if config.use_new_ingestion:
from scalyr_agent.scalyr_client import NewScalyrClientSession
result = NewScalyrClientSession(config, api_key=api_key)
return result
def verify_server_certificate(config):
"""
Verify the Scalyr server certificates.
:param config:
:return:
"""
is_dev_install = __scalyr__.INSTALL_TYPE == __scalyr__.DEV_INSTALL
is_dev_or_msi_install = __scalyr__.INSTALL_TYPE in [
__scalyr__.DEV_INSTALL,
__scalyr__.MSI_INSTALL,
]
ca_file = config.ca_cert_path
intermediate_certs_file = config.intermediate_certs_path
# Validate provided CA cert file and intermediate cert file exists. If they don't
# exist, throw and fail early and loudly
if not is_dev_install and not os.path.isfile(ca_file):
raise ValueError(
'Invalid path "%s" specified for the "ca_cert_path" config '
"option: file does not exist" % (ca_file)
)
# NOTE: We don't include intermediate certs in the Windows binary so we skip that check
# under the MSI / Windows install
if not is_dev_or_msi_install and not os.path.isfile(intermediate_certs_file):
raise ValueError(
'Invalid path "%s" specified for the '
'"intermediate_certs_path" config '
"option: file does not exist" % (intermediate_certs_file)
)
def create_client(config, quiet=False, api_key=None):
# type: (Configuration, bool, six.text_type) -> ScalyrClientSession
"""Creates and returns a new client to the Scalyr servers.
@param quiet: If true, only errors should be written to stdout.
@:param api_key: The Scalyr API key. If None, use default api_key from config
@type quiet: bool
@return: The client to use for sending requests to Scalyr, using the server address and API write logs
key in the configuration file.
@rtype: ScalyrClientSession
"""
if config.verify_server_certificate:
verify_server_certificate(config)
ca_file = config.ca_cert_path
intermediate_certs_file = config.intermediate_certs_path
else:
ca_file = None
intermediate_certs_file = None
use_requests_lib = config.use_requests_lib
return ScalyrClientSession(
config.scalyr_server,
api_key or config.api_key,
__scalyr__.SCALYR_VERSION,
quiet=quiet,
request_deadline=config.request_deadline,
ca_file=ca_file,
intermediate_certs_file=intermediate_certs_file,
use_requests_lib=use_requests_lib,
compression_type=config.compression_type,
compression_level=config.compression_level,
proxies=config.network_proxies,
disable_send_requests=config.disable_send_requests,
disable_logfile_addevents_format=config.disable_logfile_addevents_format,
enforce_monotonic_timestamps=config.enforce_monotonic_timestamps,
sessions_api_keys_tuple=config.get_number_of_configured_sessions_and_api_keys(),
)
class ScalyrClientSessionStatus(object):
def __init__(self):
self.total_requests_sent = None
self.total_requests_failed = None
self.total_request_bytes_sent = None
self.total_compressed_request_bytes_sent = None
self.total_response_bytes_received = None
self.total_request_latency_secs = None
self.total_connections_created = None
self.total_compression_time = None
class NewScalyrClientSession(object):
def __init__(self, configuration, api_key=None):
if configuration.use_new_ingestion:
from scalyr_ingestion_client.session import ( # pylint: disable=import-error
Session,
)
from scalyr_ingestion_client.client import ( # pylint: disable=import-error
ControlPlaneAPIClient,
DataPlaneAPIClient,
)
self._session = Session(uuid=str(uuid.uuid4()))
self._control_plane_client = ControlPlaneAPIClient(
service_address=configuration.new_ingestion_bootstrap_address.split(
":"
),
api_token=str(api_key or configuration.api_key),
cert_path=str(configuration.ca_cert_path),
use_tls=configuration.new_ingestion_use_tls,
)
manager_address = self._control_plane_client.send_client_hello()
self._data_plane_client = DataPlaneAPIClient(
api_token=str(configuration.api_key),
service_address=(manager_address.ip_address, manager_address.port),
cert_path=str(configuration.ca_cert_path),
use_tls=configuration.new_ingestion_use_tls,
)
def send_events(self, log_stream, events, sequence_range_start, sequence_range_end):
self._data_plane_client.send_events(
session=self._session,
log_stream=log_stream,
events=events,
sequence_range_start=sequence_range_start,
sequence_range_end=sequence_range_end,
)
class ScalyrClientSession(object):
"""Encapsulates the connection between the agent and the Scalyr servers.
It is a session in that we generally only have one connection open to the Scalyr servers at any given time.
"""
def __init__(
self,
server,
api_key,
agent_version,
quiet=False,
request_deadline=60.0,
ca_file=None,
intermediate_certs_file=None,
use_requests_lib=False,
proxies=None,
compression_type=None,
compression_level=9,
disable_send_requests=False,
disable_logfile_addevents_format=False,
enforce_monotonic_timestamps=False,
sessions_api_keys_tuple=None,
):
"""Initializes the connection.
This does not actually try to connect to the server.
@param server: The URL for the server to send requests to, such as https://agent.scalyr.com
@param api_key: The write logs key to use to authenticate all requests from this agent to scalyr.
It both authenticates the requests and identifies which account it belongs to.
@param agent_version: The agent version number, which is included in request headers sent to the server.
@param quiet: If True, will not log non-error information.
@param request_deadline: The maximum time to wait for all requests in seconds.
@param ca_file: The path to the file containing the certificates for the trusted certificate authority roots.
This is used for the SSL connections to verify the connection is to Scalyr.
@param intermediate_certs_file: The path to the file containing the certs for the trusted intermediate
certificate authorities. This is used for the SSL connections to verify the connection is to Scalyr.
@param proxies: A dict describing the network proxies to use (such as a mapping for `https`) or None.
@param compression_type: A string containing the compression method to use.
Valid options are bz2, deflate, lz4, zstandard or None. Defaults to None.
@param compression_level: An int containing the compression level of compression to use, from 1-9. Defaults to 9 (max)
@param enforce_monotonic_timestamps: A bool that indicates whether event timestamps in the same session
should be monotonically increasing or not. Defaults to False
@param sessions_api_keys_tuple: Tuple containing worker type (multiprocess, threaded) total
number of configured worker sessions and number of unique API keys configured.
@type server: six.text_type
@type api_key: six.text_type
@type agent_version: six.text_type
@type quiet: bool
@type request_deadline: float
@type ca_file: six.text_type
@type intermediate_certs_file: six.text_type
@type proxies: dict
@type compression_type: six.text_type
@type compression_level: int
@type enforce_monotonic_timestamps: bool
@type sessions_api_keys_tuple: tuple
"""
if not quiet:
log.info('Using "%s" as address for scalyr servers' % server)
# The full URL address
self.__full_address = server
# Verify the server address looks right.
parsed_server = re.match(r"^(http://|https://|)([^:]*)(:\d+|)$", server.lower())
if parsed_server is None:
raise Exception('Could not parse server address "%s"' % server)
# The Connection object that has been opened to the servers, if one has been opened.
self.__connection = None
self.__use_requests = use_requests_lib
self.__api_key = api_key
self.__session_id = scalyr_util.create_unique_id()
self.__quiet = quiet
if not quiet:
log.info(
"Using session_id=%s %s"
% (self.__session_id, scalyr_util.get_pid_tid())
)
# The time of the last success.
self.__last_success = None
# The version number of the installed agent
self.__agent_version = agent_version
# The last time the connection was closed, if any.
self.__last_connection_close = None
# We create a few headers ahead of time so that we don't have to recreate them each time we need them.
self.__standard_headers = {
"Connection": "Keep-Alive",
"Accept": "application/json",
"User-Agent": self.__get_user_agent(
agent_version, sessions_api_keys_tuple=sessions_api_keys_tuple
),
}
# Configure compression type
self.__compression_type = compression_type
self.__compress = None
encoding = None
if compression_type:
compress_func = verify_and_get_compress_func(
compression_type, compression_level
)
if compress_func:
self.__compress = compress_func
encoding = compression_type
if not self.__compress:
log.warning(
"'%s' compression specified, but '%s' compression is not available. No compression will be used."
% (compression_type, compression_type)
)
if encoding and encoding != "none":
self.__standard_headers["Content-Encoding"] = encoding
# Configure compression level
self.__compression_level = compression_level
# The number of sconds to wait for a blocking operation on the connection before considering it to have
# timed out.
self.__request_deadline = request_deadline
# The total number of RPC requests sent.
self.total_requests_sent = 0
# The total number of RPC requests that failed.
self.total_requests_failed = 0
# The total number of bytes sent over the network.
self.total_request_bytes_sent = 0
# The total number of compressed bytes sent over the network
self.total_compressed_request_bytes_sent = 0
# The total number of bytes received.
self.total_response_bytes_received = 0
# The total number of secs spent waiting for a responses (so average latency can be calculated by dividing
# this number by self.total_requests_sent). This includes connection establishment time.
self.total_request_latency_secs = 0
# The total number of HTTP connections successfully created.
self.total_connections_created = 0
# The total time spent compressing messages
self.total_compression_time = 0
# The path the file containing the certs for the root certificate authority to use for verifying the SSL
# connection to Scalyr. If this is None, then server certificate verification is disabled, and we are
# susceptible to man-in-the-middle attacks.
self.__ca_file = ca_file
self.__intermediate_certs_file = intermediate_certs_file
self.__proxies = proxies
# debug flag to disable send requests
self.__disable_send_requests = disable_send_requests
# flag to disable new addEvents format, TODO: remove this when we are confident it works
self.__disable_logfile_addevents_format = disable_logfile_addevents_format
# whether or not to monotonically increase event timestamps within the same session
self.__enforce_monotonic_timestamps = enforce_monotonic_timestamps
def generate_status(self):
# type: () -> ScalyrClientSessionStatus
result = ScalyrClientSessionStatus()
result.total_requests_sent = self.total_requests_sent
result.total_requests_failed = self.total_requests_failed
result.total_request_bytes_sent = self.total_request_bytes_sent
result.total_compressed_request_bytes_sent = (
self.total_compressed_request_bytes_sent
)
result.total_response_bytes_received = self.total_response_bytes_received
result.total_request_latency_secs = self.total_request_latency_secs
result.total_connections_created = self.total_connections_created
result.total_compression_time = self.total_compression_time
return result
def augment_user_agent(self, fragments):
"""Modifies User-Agent header (applies to all data sent to Scalyr)
@param fragments String fragments to append (in order) to the standard user agent data
@type fragments: List of six.text_type
"""
self.__standard_headers["User-Agent"] = self.__get_user_agent(
self.__agent_version, fragments
)
@property
def session_id(self): # type: () -> six.text_type
return self.__session_id
def ping(self):
"""Ping the Scalyr server by sending a test message to add zero events.
If the returned message is 'success', then it has been verified that the agent can connect to the
configured Scalyr server and that the api key is correct.
@return: The status message returned by the server.
@rtype:
"""
return self.send(self.add_events_request())[0]
def __send_request(
self,
request_path,
body=None,
body_func=None,
is_post=True,
block_on_response=True,
):
"""Sends a request either using POST or GET to Scalyr at the specified request path. It may be either
a POST or GET.
Parses, returns response.
@param request_path: The path of the URL to post to.
@param [body]: The body string to send. May be None if body_func is specified. Ignored if not POST.
@param [body_func]: A function that will be invoked to retrieve the body to send in the post. Ignored if not
POST.
@param [is_post]: True if this request should be sent using a POST, otherwise GET.
@param [block_on_response]: True if this request should block, waiting for the response. If False, it will
not block, but instead return a function, that will invoked, will block.
@type request_path: six.text_type
@type body: binary_type|None
@type body_func: func|None
@type is_post: bool
@type block_on_response: bool
@return: If block_on_response is True, a tuple containing the status message in the response
(such as 'success'), the number of bytes sent, and the full response. If block_on_response is False,
then returns a function, that will invoked, will block and return the tuple.
@rtype: (str, int, str) or Function
"""
current_time = time.time()
# Refuse to try to send the message if the connection has been recently closed and we have not waited
# long enough to try to re-open it. We do this to avoid excessive connection opens and SYN floods.
if (
self.__last_connection_close is not None
and current_time - self.__last_connection_close < 30
):
return self.__wrap_response_if_necessary(
"client/connectionClosed", 0, "", block_on_response
)
self.total_requests_sent += 1
was_sent = False
try:
try:
if self.__connection is None:
self.__connection = ConnectionFactory.connection(
self.__full_address,
self.__request_deadline,
self.__ca_file,
self.__intermediate_certs_file,
self.__standard_headers,
self.__use_requests,
quiet=self.__quiet,
proxies=self.__proxies,
)
self.total_connections_created += 1
except Exception as e:
error_code = (
getattr(e, "error_code", "client/connectionFailed")
or "client/connectionFailed"
)
return self.__wrap_response_if_necessary(
error_code, 0, "", block_on_response
)
if is_post:
if body is None:
body_str = body_func()
else:
body_str = body
else:
body_str = b""
# Store reference to the raw uncompressed body string since we will need it later for
# logging purposes
body_str_raw = body_str
self.total_request_bytes_sent += len(body_str) + len(request_path)
if self.__compress:
size_before_compress = len(body_str)
start_time = time.time()
body_str = self.__compress(body_str)
end_time = time.time()
size_after_compress = len(body_str)
compression_ratio = round(
(float(size_before_compress) / size_after_compress), 2
)
duration = round((end_time - start_time), 4)
self.total_compression_time += duration
log.log(
scalyr_logging.DEBUG_LEVEL_1,
'Compressed add event request data using "%s" algorithm and level "%s": '
"original_size=%s compressed_size=%s compression_ratio=%s duration=%ss"
% (
self.__compression_type,
self.__compression_level,
size_before_compress,
size_after_compress,
compression_ratio,
duration,
),
)
self.total_compressed_request_bytes_sent += len(body_str) + len(
request_path
)
# noinspection PyBroadException
try:
if self.__disable_send_requests:
log.log(
scalyr_logging.DEBUG_LEVEL_0,
"Send requests disabled. %d bytes dropped"
% self.total_request_bytes_sent,
limit_once_per_x_secs=60,
limit_key="send-requests-disabled",
)
else:
if is_post:
if log.getEffectiveLevel() == scalyr_logging.DEBUG_LEVEL_5:
# NOTE: We only perform this string formatting if debug level is enabled
# to save some CPU cycles when it's not.
# Workaround to fix issue with logging non utf-8 characters. We simply
# ignore non utf-8 characters.
body_str_raw = body_str_raw.decode("utf-8", "ignore")
if len(body_str_raw) > MAX_REQUEST_BODY_SIZE_LOG_MSG_LIMIT:
# We truncate long messages to avoid filling up the debug log too
# fast and to avoid increased memory usage under Python 2.7.
body_str_raw = body_str_raw[
:MAX_REQUEST_BODY_SIZE_LOG_MSG_LIMIT
]
body_str_raw += (
" ... [body truncated to %s chars] ..."
% (MAX_REQUEST_BODY_SIZE_LOG_MSG_LIMIT)
)
log.log(
scalyr_logging.DEBUG_LEVEL_5,
'Sending POST %s with body "%s"',
request_path,
body_str_raw,
)
self.__connection.post(request_path, body=body_str)
else:
log.log(
scalyr_logging.DEBUG_LEVEL_5, "Sending GET %s", request_path
)
self.__connection.get(request_path)
except Exception as error:
# TODO: Do not just catch Exception. Do narrower scope.
if (
hasattr(error, "errno")
and error.errno is not None # pylint: disable=no-member
):
log.error(
'Failed to connect to "%s" due to errno=%d. Exception was %s. Closing connection, '
"will re-attempt",
self.__full_address,
error.errno, # pylint: disable=no-member
six.text_type(error),
error_code="client/requestFailed",
)
else:
log.exception(
"Failed to send request due to exception. Closing connection, will re-attempt",
error_code="requestFailed",
)
return self.__wrap_response_if_necessary(
"requestFailed", len(body_str), "", block_on_response
)
was_sent = True
def receive_response():
return self.__receive_response(body_str, current_time)
if not block_on_response:
return receive_response
else:
return receive_response()
finally:
if not was_sent:
self.total_request_latency_secs += time.time() - current_time
self.total_requests_failed += 1
self.close(current_time=current_time)
def __receive_response(self, body_str, send_time):
"""Receives a response for a request previously sent using __send_request.
@param body_str: The body of the request that was sent.
@param send_time: The time of day when the request was sent.
@type body_str: str
@type send_time: float
@return: The tuple containing the status message in the response (such as 'success'), the number of bytes
sent, and the full response.
@rtype: (str, int, str)
"""
response = ""
was_success = False
bytes_received = 0
try:
try:
if self.__disable_send_requests:
response = '{ "status":"success" }'
status_code = 200
else:
status_code = self.__connection.status_code()
response = self.__connection.response()
bytes_received = len(response)
except six.moves.http_client.HTTPException as httpError:
log.error(
"Failed to receive response due to HTTPException '%s'. Closing connection, will re-attempt"
% (httpError.__class__.__name__),
error_code="requestFailed",
)
return "requestFailed", len(body_str), response
except Exception as error:
# TODO: Do not just catch Exception. Do narrower scope.
if (
hasattr(error, "errno")
and error.errno is not None # pylint: disable=no-member
):
log.error(
'Failed to receive response to "%s" due to errno=%d. Exception was %s. Closing '
"connection, will re-attempt",
self.__full_address,
error.errno, # pylint: disable=no-member
six.text_type(error),
error_code="client/requestFailed",
)
else:
log.exception(
"Failed to receive response due to exception. Closing connection, will re-attempt",
error_code="requestFailed",
)
return "requestFailed", len(body_str), response
try:
response = six.ensure_text(response, "utf-8", "ignore")
except Exception:
# We ignore the exception since we still want to log the response even if it
# contains non utf-8 characters
pass
log.log(
scalyr_logging.DEBUG_LEVEL_5,
'Response was received with body "%s"',
response,
)
if status_code == 429:
log.error(
'Received "too busy" response from server. Will re-attempt',
error_code="serverTooBusy",
)
return "serverTooBusy", len(body_str), response
# If we got back an empty result, that often means the connection has been closed or reset.
if len(response) == 0:
log.error(
"Received empty response, server may have reset connection. Will re-attempt",
error_code="emptyResponse",
)
return "emptyResponse", len(body_str), response
# Try to parse the response
# noinspection PyBroadException
try:
response_as_json = scalyr_util.json_decode(response)
except Exception:
# TODO: Do not just catch Exception. Do narrower scope. Also, log error here.
log.error(
"Failed to parse response of '%s' due to exception. Closing connection, will "
"re-attempt",
scalyr_util.remove_newlines_and_truncate(response, 1000),
error_code="parseResponseFailed",
)
return "parseResponseFailed", len(body_str), response
self.__last_success = send_time
if "status" in response_as_json:
status = response_as_json["status"]
if status == "success":
was_success = True
elif status == "error/client/badParam":
log.error(
"Request to '%s' failed due to a bad parameter value. This may be caused by an "
"invalid write logs api key in the configuration",
self.__full_address,
error_code="error/client/badParam",
)
else:
log.error(
"Request to '%s' failed due to an error. Returned error code was '%s'",
self.__full_address,
status,
error_code="error/client/badParam",
)
return status, len(body_str), response
else:
log.error(
"No status message provided in response. Unknown error. Response was '%s'",
scalyr_util.remove_newlines_and_truncate(response, 1000),
error_code="unknownError",
)
return "unknownError", len(body_str), response
finally:
self.total_request_latency_secs += time.time() - send_time
if not was_success:
self.total_requests_failed += 1
self.close(current_time=send_time)
self.total_response_bytes_received += bytes_received
def __wrap_response_if_necessary(
self, status_message, bytes_sent, response, block_on_response
):
"""Wraps the response as appropriate based on whether or not the caller is expecting to block on the
response or not.
If the caller requested to not block on the response, then they are expecting a function to be returned
that, when invoked, will block and return the result. If the caller did requested to block on the response,
then the response should be returned directly.
This is used to cover cases where there was an error and we do not have to block on the response from
the server. Instead, we already have the response to return. However, we still need to return the
right type of object to the caller.
@param status_message: The status message for the response.
@param bytes_sent: The number of bytes that were sent.
@param response: The response to return.
@param block_on_response: Whether or not the caller requested to block, waiting for the response. This controls
whether or not a function is returned or just the response tuple directly.
@type status_message: str
@type bytes_sent: int
@type response: str
@type block_on_response: bool
@return: Either a func or a response tuple (status message, num of bytes sent, response body) depending on
the value of ``block_on_response``.
@rtype: func or (str, int, str)
"""
if block_on_response:
return status_message, bytes_sent, response
def wrap():
return status_message, bytes_sent, response
return wrap
def send(self, add_events_request, block_on_response=True):
"""Sends an AddEventsRequest to Scalyr.
The AddEventsRequest should have been retrieved using the 'add_events_request' method on this object.
@param add_events_request: The request containing any log lines/events to copy to the server.
@param block_on_response: If True, this method will block, waiting for the response from the server.
Otherwise, it will not block. Instead, a function will be returned, that when invoked, will block.
@type add_events_request: AddEventsRequest
@type block_on_response: bool
@return: If block_on_response is True, a tuple containing the status message in the response
(such as 'success'), the number of bytes sent, and the full response. If block_on_response is False,
then returns a function, that will invoked, will block and return the tuple.
@rtype: (str, int, str) or Function
"""
current_time = time.time()
def generate_body():
add_events_request.set_client_time(current_time)
return add_events_request.get_payload()
return self.__send_request(
"/addEvents", body_func=generate_body, block_on_response=block_on_response,
)
def close(self, current_time=None):
"""Closes the underlying connection to the Scalyr server.
@param current_time: If not None, the time to use for the current time. Used for testing purposes.
@type current_time: float or None
"""
if self.__connection is not None:
if current_time is None:
current_time = time.time()
self.__connection.close()
self.__connection = None
self.__last_connection_close = current_time
def add_events_request(self, session_info=None, max_size=1 * 1024 * 1024 * 1024):
"""Creates and returns a new AddEventRequest that can be later sent by this session.
The caller is expected to add events to this request and then submit it for transmission using
the 'send' method.
@param session_info: The session info for this session, which is basically any attributes that should
be added to all events uploaded by this agent, such as server attributes from the config file.
@param max_size: The maximum number of bytes to send in this request.
@param disable_logfile_addevents_format: Flag to disable the improved addEvents format
@type session_info: dict
@type max_size: int
@type disable_logfile_addevents_format: bool
@return: The request that can be populated.
@rtype: AddEventsRequest
"""
body = {
"token": self.__api_key,
"session": self.__session_id,
"threads": [],
}
if session_info is not None:
# session_info used to be a JsonObject but now must be dict.
assert type(session_info) == dict
body["sessionInfo"] = session_info
return AddEventsRequest(
body,
max_size=max_size,
disable_logfile_addevents_format=self.__disable_logfile_addevents_format,
enforce_monotonic_timestamps=self.__enforce_monotonic_timestamps,
)
def __get_user_agent(
self, agent_version, fragments=None, sessions_api_keys_tuple=None
):
"""Determine the user agent to report in the request headers.
We construct an agent that gives Scalyr some information about the platform the customer is running on,
the Python version, and a few other tidbits. This is used to make decisions about support issues.
@param agent_version: The agent version number.
@param fragments: Additional strings to be appended. Each will be preceded by a semicolon
@type agent_version: six.text_type
@type fragments: List of six.text_type
@return: The user agent string.
@rtype: six.text_type
"""
# We will construct our agent string to look something like:
# Linux-redhat-7.0;python-2.7.2;agent-2.0.1;ssllib
# And for requests using requests library:
# Linux-redhat-7.0;python-2.7.2;agent-2.0.1;ssllib;requests-2.22.0
python_version = sys.version_info
if len(python_version) >= 5:
python_version_str = "python-%s.%s.%s" % (
python_version[0],
python_version[1],
python_version[2],
)
else:
python_version_str = "python-unknown"
# Try for a linux distribution first. This doesn't seem to work for Amazon AMIs, but for most
# distributions it hopefully will provide something readable.
platform_value = None
# noinspection PyBroadException
try:
distribution = platform.dist()
if len(distribution[0]) > 0:
platform_value = "Linux-%s-%s" % (distribution[0], distribution[1])
except Exception:
platform_value = None
# Try Mac
if platform_value is None:
# noinspection PyBroadException
try:
mac_ver = platform.mac_ver()[0]
if len(mac_ver) > 0:
platform_value = "MacOS-%s" % mac_ver
except Exception:
platform_value = None
# Fall back for all others. This should print out something reasonable for
# Windows.
if platform_value is None:
platform_value = platform.platform(terse=1)
# Include openssl version if available
# Returns a tuple like this: (1, 1, 1, 8, 15)
openssl_version = getattr(ssl, "OPENSSL_VERSION_INFO", None)
if openssl_version:
try:
openssl_version_string = (
".".join([str(v) for v in openssl_version[:3]])
+ "-"
+ str(openssl_version[3])
)
openssl_version_string = "o-%s" % (openssl_version_string)
except Exception:
openssl_version_string = None
else:
openssl_version_string = None
# Include a string which indicates if the agent is running admin / root user
from scalyr_agent.platform_controller import PlatformController
try:
platform_controller = PlatformController.new_platform()
current_user = platform_controller.get_current_user()
except Exception:
# In some tests on Windows this can throw inside the tests so we ignore the error
current_user = "unknown"
if current_user in ["root", "Administrator"] or current_user.endswith(
"\\Administrators"
):
# Indicates agent running as a privileged / admin user
user_string = "a-1"
else:
# Indicates agent running as a regular user
user_string = "a-0"
sharded_copy_manager_string = ""
# Possible values for this header fragment:
# mw-0 - Sharded copy manager functionality is disabled
# mw-1|<num_sessions>|<num_api_keys> - Functionality is enabled and there are <num_sessions>
# thread based sessions configured with <num_api_keys> unique API keys.
# mw-2|<num_sessions>|<num_api_keys> - Functionality is enabled and there are <num_sessions>
# process based sessions configured with <num_api_keys> unique API keys.
if (
sessions_api_keys_tuple
and isinstance(sessions_api_keys_tuple, tuple)
and len(sessions_api_keys_tuple) == 3
and sessions_api_keys_tuple[1] > 1
):
(worker_type, workers_count, api_keys_count,) = sessions_api_keys_tuple
if worker_type == "multiprocess":
sharded_copy_manager_string = "mw-2|"
else:
sharded_copy_manager_string = "mw-1|"
sharded_copy_manager_string += "%s|%s" % (workers_count, api_keys_count)
else:
sharded_copy_manager_string = "mw-0"
parts = [
platform_value,
python_version_str,
"agent-%s" % agent_version,
]
if openssl_version_string:
parts.append(openssl_version_string)
if user_string:
parts.append(user_string)
if sharded_copy_manager_string:
parts.append(sharded_copy_manager_string)
if self.__use_requests:
import scalyr_agent.third_party.requests as requests
parts.append("requests-%s" % (requests.__version__))
if fragments:
parts.extend(fragments)
return ";".join(map(six.text_type, parts))
def perform_agent_version_check(self, track="stable"):
"""Query the Scalyr API to determine if a newer version is available
"""
url_path = (
"/ajax?method=performAgentVersionCheck&installedVersion=%s&track=%s"
% (self.__agent_version, track)
)
return self.__send_request(url_path, is_post=False)
class EventSequencer(object):
"""Responsible for keeping track of sequences for an AddEventsRequest
This abstraction keeps track of previously seen sequence_ids and numbers
And adds the appropriate fields to an event based on provided sequence ids
and values.
This is a standalone class so that various test objects can also use the same
logic
"""
__slots__ = ("__previous_sequence_id", "__previous_sequence_number")
def __init__(self):
# the previously used sequence_id, used to determine if we need to send the sequence_id
# with an event
self.__previous_sequence_id = None
# the previously seen sequence_number - used to calculate deltas for the sequence_number
self.__previous_sequence_number = None
def reset(self):
"""Resets the sequence tracking"""
self.__previous_sequence_id = None
self.__previous_sequence_number = None
def get_memento(self):
"""returns internal state of the sequencer as a tuple
Callers can use this method to set and restore the internal state of the sequencer
"""
return (self.__previous_sequence_id, self.__previous_sequence_number)
def restore_from_memento(self, memento):
"""Restores the state of the EventSequencer with the values from a previous
call to get_memento
"""
self.__previous_sequence_id = memento[0]
self.__previous_sequence_number = memento[1]
def add_sequence_fields(self, event, sequence_id, sequence_number):
"""
If sequence_id and sequence_number are non-None then this method will automatically add the following
fields:
'si' set to the value of sequence_id if it is different from the previously seen sequence_id
'sn' set to the value of sequence_number if we haven't previously seen a sequence number
'sd' set to the delta of the sequence_number and the previously seen sequence_number
The 'sn' and 'sd' fields are mutually exclusive. Only one or the other will be used
@param event: The event to update
@param sequence_id: The sequence id
@param sequence_number: The sequence number
@type event: Event
@type sequence_id: long
@type sequence_number: long
"""
# only add sequence information if both the sequence fields are valid
if sequence_id is not None and sequence_number is not None:
# add the 'si' field if necessary
if sequence_id != self.__previous_sequence_id:
event.set_sequence_id(sequence_id)
self.__previous_sequence_id = sequence_id
# a new sequence id means we should also send the full sequence number
# so make sure that __previous_sequence_number is None
self.__previous_sequence_number = None
# if we don't have a previous sequence number then send the full number
# otherwise send the delta
if self.__previous_sequence_number is None:
event.set_sequence_number(sequence_number)
else:
event.set_sequence_number_delta(
sequence_number - self.__previous_sequence_number
)
self.__previous_sequence_number = sequence_number
class AddEventsRequest(object):
"""Used to construct an AddEventsRequest to eventually send.
This abstraction has three key features. First, it uses a generally more efficient scheme to build
up the string to eventually use as the body for an add_events request. Secondly, it does not require all events
at construction time. Instead, you can incrementally add more events before the request is actually sent. This
leads to better memory utilization when combined with an abstraction that is incrementally reading events from disk.
It will also prevent you from exceeding the maximum request size. Third, you may undo the effect of adding events
to the request before it is sent. This is useful to rollback the request state to a previous state if some
problem occurs.
"""
def __init__(
self,
base_body,
max_size=1 * 1024 * 1024,
disable_logfile_addevents_format=False,
enforce_monotonic_timestamps=False,
):
"""Initializes the instance.
@param base_body: A JsonObject or dict containing the information to send as the body of the add_events
request, with the exception of the events field. The events and client_timestamp fields must not be
included because they will be added later. Note, base_body must have some fields set, such as 'ts' which is
required by the server.
@param max_size: The maximum number of bytes this request can consume when it is serialized to JSON.
@param disable_logfile_addevents_format: Flag to disable the improved addEvents format
@param enforce_monotonic_timestamps: A bool that indicates whether event timestamps in the same session
should be monotonically increasing or not. Defaults to False
"""
assert len(base_body) > 0, "The base_body object must have some fields defined."
assert (
"events" not in base_body
), "The base_body object cannot already have 'events' set."
assert (
"client_time" not in base_body
), "The base_body object cannot already have 'client_time' set."
# As an optimization, we use a StringIO object to serialize the request. We also
# do a little bit of the JSON object assembly by hand. Specifically, we serialize the request
# to JSON without the 'events' field, but then delete the last '}' so that we can manually
# add in the 'events: [ ... ]' ourselves. This way we can watch the size of the buffer as
# we build up events.
# 2->TODO use BytesIO, make all data that is going to be written here - binary
string_buffer = io.BytesIO()
serialized_base_body = scalyr_util.json_encode(base_body, binary=True)
# Now go back and find the last '}' and delete it so that we can open up the JSON again.
# NOTE: base_body is always a dict so we can simply rewinding logic by assuming }
# is always the last character when serializing a dict to json
string_buffer.write(serialized_base_body[:-1])
# Append the start of our events field.
string_buffer.write(b", events: [")
# This buffer keeps track of all of the stuff that must be appended after the events JSON array to terminate
# the request. That includes both the threads JSON array and the client timestamp.
if disable_logfile_addevents_format:
self.__post_fix_buffer = PostFixBuffer(
b"], threads: THREADS, client_time: TIMESTAMP }",
disable_logfile_addevents_format,
)
else:
self.__post_fix_buffer = PostFixBuffer(
b"], logs: LOGS, threads: THREADS, client_time: TIMESTAMP }",
disable_logfile_addevents_format,
)
# The time that will be sent as the 'client_time' parameter for the addEvents request.
# This may be later updated using the set_client_time method in the case where the same AddEventsRequest
# is being reused to send the events again.
self.__post_fix_buffer.set_client_timestamp(time.time())
self.__buffer = string_buffer
self.__max_size = max_size
self.__events_added = 0
# Whether or not to enforce monotonically increasing timestamps
self.__enforce_monotonic_timestamps = enforce_monotonic_timestamps
# If we have finished serializing the body, it is stored here until the close() method is invoked.
self.__body = None
# Used to add sequence fields to an event
self.__event_sequencer = EventSequencer()
# Used to record some performance timing data for debugging/analysis
self.__timing_data = dict()
@property
def current_size(self):
"""
@return: The number of bytes that will be used to send the current request. This include both the bytes
from the events and the post fix.
@rtype: int
"""
if self.__buffer is not None:
return self.__buffer.tell() + self.__post_fix_buffer.length
else:
return len(self.__body)
def add_log_and_thread(self, thread_id, thread_name, log_attrs):
"""Registers the specified log for this AddEvents request.
Any thread id mentioned in any event in this request should first be registered here.
@param thread_id: An id for the thread. This can then be used as the value for a ``thread`` field
in the ``event`` object passed to ``add_event``. Should be unique for this session.
@param thread_name: A human-readable name for the thread
@param log_attrs: The metadata for this log
@param disable_logfile_addevents_format: Flag to disable the improved addEvents format
@type thread_id: str
@type thread_name: str
@type log_attrs: dict
@type disable_logfile_addevents_format: bool
@return: True if there was the allowed bytes to send were not exceeded by adding this log to the
request.
@rtype: bool
"""
# Have to account for the extra space this will use when serialized. See how much space we can allow for
# the post fix right now.
available_size_for_post_fix = self.__max_size - self.__buffer.tell()
return self.__post_fix_buffer.add_log_and_thread_entry(
thread_id,
thread_name,
log_attrs,
fail_if_buffer_exceeds=available_size_for_post_fix,
)
def add_event(self, event, timestamp=None, sequence_id=None, sequence_number=None):
"""Adds the serialized JSON for event if it does not cause the maximum request size to be exceeded.
It will automatically set the event's timestamp field to a new timestamp based on the current time
but ensuring it is greater than any previous timestamp that has been used.
If sequence_id and sequence_number are specified then this method will automatically set the following
Event fields fields:
'sequence_id' set to the value of sequence_id if it is different from the previously seen sequence_id
'sequence_number' set to the value of sequence_number if we haven't previously seen a sequence number
'sequence_number_delta' set to the delta of the sequence_number and the previously seen sequence_number
The 'sequence_number' and 'sequence_number_delta' fields are mutually exclusive. Only one or the other will be
used.
It is illegal to invoke this method if 'get_payload' has already been invoked.
@param event: The event object.
@param timestamp: The timestamp to use for the event, in nanoseconds since the Epoch. If None, will be the current time.time()
@param sequence_id: A globally unique id, grouping a set of sequence_numbers
@param sequence_number: A monotonically increasing sequence_number
@type event: Event
@type timestamp: long
@type sequence_id: long
@type sequence_number: long
@return: True if the event's serialized JSON was added to the request, or False if that would have resulted
in the maximum request size being exceeded so it did not.
"""
start_pos = self.__buffer.tell()
# If we already added an event before us, then make sure we add in a comma to separate us from the last event.
if self.__events_added > 0:
self.__buffer.write(b",")
timestamp = self.__get_valid_timestamp(timestamp=timestamp)
# get copy of event sequencer state in case the event wasn't actually added
# and we need to restore it
memento = self.__event_sequencer.get_memento()
self.__event_sequencer.add_sequence_fields(event, sequence_id, sequence_number)
event.set_timestamp(timestamp)
event.serialize(self.__buffer)
# Check if we exceeded the size, if so chop off what we just added.
# Also reset previously seen sequence numbers and ids
if self.current_size > self.__max_size:
# 2->TODO: io._IOBase does set new position after truncate, need to do seek explicitly.
self.__buffer.truncate(start_pos)
# 2->TODO: new data streams from "io" don't return to the specified position, need to change position manually.
self.__buffer.seek(start_pos)
self.__event_sequencer.restore_from_memento(memento)
return False
self.__events_added += 1
return True
@property
def num_events(self):
"""Returns the number of events added to this request so far.
@return: The number of events added to this request.
@rtype: int
"""
return self.__events_added
def set_client_time(self, current_time):
"""Update the 'client_time' field in the request.
The 'client_time' field should be set to the current time as known by the client when this request is
sent. Since a AddEventsRequest can be re-used multiple times to try to resend some events, it is important
to update the 'client_time' field before each send.
The 'client_time' field is used by the server to warn when the client clock skew is too great since that
can lead to log upload problems.
@param current_time: The current time to include in the request.
@type current_time: float
"""
# Get the current size of the postfix buffer since we may need it down below. We need the length before
# the new timestamp was added.
original_postfix_length = self.__post_fix_buffer.length
self.__post_fix_buffer.set_client_timestamp(current_time)
if self.__body is not None:
# We have already cached the serialized JSON, so we need to update it to remain consistent.
# Create a buffer for the copying. We write in the entire JSON and then just back up the length of
# the old postfix and then add in the new one.
# 2->TODO: use BytesIO
rebuild_buffer = io.BytesIO()
rebuild_buffer.write(self.__body)
self.__body = None
rebuild_buffer.seek(-1 * original_postfix_length, 2) # os.SEEK_END
rebuild_buffer.truncate()
rebuild_buffer.write(self.__post_fix_buffer.content())
self.__body = rebuild_buffer.getvalue()
rebuild_buffer.close()
def get_payload(self):
"""Returns the serialized JSON to use as the body for the add_request.
After this is invoked, no new events can be added via the 'add_event' method. However,
you may call the 'set_client_time' method to update when this request is being sent, according to
the client clock.
"""
if self.__body is None:
self.__buffer.write(self.__post_fix_buffer.content())
self.__body = self.__buffer.getvalue()
self.__buffer.close()
self.__buffer = None
return self.__body
def close(self):
"""Must be invoked after this request is no longer needed. You may not add events or invoke get_payload
after this call.
"""
self.__body = None
self.__buffer = None
def increment_timing_data(self, **key_values):
"""Increments the timing data kept as part of this data structure to help diagnosis performance issues.
The arguments should be key/value pairs where the keys name some sort of timing component and the value
by which to increment the count for that timing component.
If this is the first time a timing component is being incremented, the initial value is set to zero.
"""
for key, value in six.iteritems(key_values):
if key in self.__timing_data:
amount = self.__timing_data[key]
else:
amount = 0.0
amount += value
self.__timing_data[key] = amount
def get_timing_data(self):
"""Serializes all of the timing data that has been collected via ``increment_timing_data``.
@return: A string of the key/value pairs for all timing data.
@rtype: str
"""
output_buffer = io.StringIO()
first_time = True
# sort by key, to get a predictable result.
for key, value in sorted(
six.iteritems(self.__timing_data), key=lambda el: el[0]
):
if not first_time:
output_buffer.write(" ")
else:
first_time = False
output_buffer.write(key)
output_buffer.write("=")
output_buffer.write(six.text_type(value))
return output_buffer.getvalue()
def __get_valid_timestamp(self, timestamp=None):
"""
Gets a timestamp in nanoseconds since the Epoch
@param timestamp: A timestamp to validate. If None, time.time() is used for the value of timestamp.
@return: The next timestamp to use for events.
@rtype: int
"""
if timestamp is None:
timestamp = int(time.time() * 1e9)
if self.__enforce_monotonic_timestamps:
global __last_time_stamp__
# pylint: disable=used-before-assignment
if __last_time_stamp__ is not None and timestamp <= __last_time_stamp__:
timestamp = __last_time_stamp__ + 1
__last_time_stamp__ = timestamp
return timestamp
@property
def total_events(self):
"""Returns the total number of events that will be sent in this batch."""
return self.__events_added
def position(self):
"""Returns a position such that if it is passed to 'set_position', all events added since this method was
invoked are removed."""
return AddEventsRequest.Position(
self.__events_added, self.__buffer.tell(), self.__post_fix_buffer.position
)
def set_position(self, position):
"""Reverts this object to only contain the events contained by the object when position was invoked to
get the passed in position.
@param position: The position token representing the previous state.
"""
self.__events_added = position.events_added
# 2->TODO: io._IOBase does set new position after truncate, need to do seek explicitly.
self.__buffer.truncate(position.buffer_size)
self.__buffer.seek(position.buffer_size)
self.__post_fix_buffer.set_position(position.postfix_buffer_position)
# reset previously seen sequence id and numbers
self.__event_sequencer.reset()
class Position(object):
"""Represents a position in the added events.
"""
def __init__(self, events_added, buffer_size, postfix_buffer_position):
self.events_added = events_added
self.buffer_size = buffer_size
self.postfix_buffer_position = postfix_buffer_position
# This is used down below by PostFixBuffer.
def _calculate_per_thread_extra_bytes():
"""Calculates how many extra bytes are added to the serialized form of the threads JSON array
when adding a new thread, excluding the bytes for serializing the thread id and name themselves.
This is used below by the PostFixBuffer abstraction to help calculate the number of bytes the serialized form
of the PostFixBuffer will take, without having to actually serialize it. It was found that doing the heavy
weight process of serializing it over and over again to just get the size was eating too much CPU.
@return: An array of two int entries. The first entry is how many extra bytes are added when adding the
first thread to the threads JSON array and the second is how many extra bytes are added for all subsequent
threads. (The number differs by at least one due to the need for a comma to be inserted).
@rtype: [int]
"""
# An array of the number of bytes used to serialize the array when there are N threads in it (where N is the
# index into size_by_entries).
sizes_by_entries = []
# Calculate sizes_by_entries by actually serializing each case.
threads = []
test_string = "A"
for i in range(3):
sizes_by_entries.append(len(scalyr_util.json_encode(threads)))
# Add in another thread for the next round through the loop.
threads.append({"id": test_string, "name": test_string})
# Now go back and calculate the deltas between the different cases. We have to remember to subtract
# out the length due to the id and name strings.
test_string_len = len(scalyr_util.json_encode(test_string))
result = []
for i in range(1, 3):
result.append(
sizes_by_entries[i] - sizes_by_entries[i - 1] - 2 * test_string_len
)
return result
def _calculate_per_log_extra_bytes():
"""Calculates how many extra bytes are added to the serialized form of the threads JSON array
when adding a new thread, excluding the bytes for serializing the thread id and name themselves.
This is used below by the PostFixBuffer abstraction to help calculate the number of bytes the serialized form
of the PostFixBuffer will take, without having to actually serialize it. It was found that doing the heavy
weight process of serializing it over and over again to just get the size was eating too much CPU.
@return: An array of two int entries. The first entry is how many extra bytes are added when adding the
first thread to the threads JSON array and the second is how many extra bytes are added for all subsequent
threads. (The number differs by at least one due to the need for a comma to be inserted).
@rtype: [int]
"""
# An array of the number of bytes used to serialize the array when there are N threads in it (where N is the
# index into size_by_entries).
sizes_by_entries = []
# Calculate sizes_by_entries by actually serializing each case.
logs = []
test_string = "A"
test_dict = {}
for i in range(3):
sizes_by_entries.append(len(scalyr_util.json_encode(logs)))
# Add in another thread for the next round through the loop.
logs.append({"id": test_string, "attrs": test_dict})
# Now go back and calculate the deltas between the different cases. We have to remember to subtract
# out the length due to the id and name strings.
test_string_len = len(scalyr_util.json_encode(test_string))
test_dict_len = len(scalyr_util.json_encode(test_dict))
result = []
for i in range(1, 3):
result.append(
sizes_by_entries[i]
- sizes_by_entries[i - 1]
- (test_string_len + test_dict_len)
)
return result
class PostFixBuffer(object):
# 2->TODO this is binary buffer. Make it work only with binary data.
"""Buffer for the items that must be written after the events JSON array, which typically means
the client timestamp and the threads JSON array.
This abstraction has optimizations in place to more efficiency keep track of the number of bytes the
that will be used by the serialized form.
Additionally, the buffer can be reset to a previous position.
"""
def __init__(self, format_string, disable_logfile_addevents_format=False):
"""Initializes the buffer.
@param format_string: The format for the buffer. The output of this buffer will be this format string
with the keywords THREADS and TIMESTAMP replaced with the json serialized form of the threads
JSON array and the timestamp.
@type format_string: six.binary_type
"""
# Make sure the keywords are used in the format string.
assert b"THREADS" in format_string
assert b"TIMESTAMP" in format_string
if not disable_logfile_addevents_format:
assert b"LOGS" in format_string
self.__disable_logfile_addevents_format = disable_logfile_addevents_format
# The entries added to include in the logs JSON array in the request.
self.__logs = []
# The entries added to include in the threads JSON array in the request.
self.__threads = []
# The timestamp to include in the output.
self.__client_timestamp = 0
self.__format = format_string
self.__current_size = len(self.content())
# Static variable holding the number of extra bytes to add in when calculating the new size due to adding in
# a new thread entry (beyond just the bytes due to the serialized thread id and thread name themselves).
# This will have two entries. See above for a better description.
__per_thread_extra_bytes = _calculate_per_thread_extra_bytes()
__per_log_extra_bytes = _calculate_per_log_extra_bytes()
@property
def length(self):
"""The number of bytes the serialized buffer will take.
@return: The number of bytes
@rtype: int
"""
return self.__current_size
def content(self, cache_size=True):
"""Serialize all the information for the post fix and return it.
@param cache_size: Used for testing purposes. Can be used to turn off a slop factor that will automatically
fix differences between the calculated size and the actual size. We turn this off for testing to make
sure we catch these errors.
@type cache_size: bool
@return: The post fix to include at the end of the AddEventsRequest.
@rtype: six.binary_data
"""
result = self.__format
if not self.__disable_logfile_addevents_format:
result = result.replace(
b"LOGS", scalyr_util.json_encode(self.__logs, binary=True)
)
result = result.replace(
b"TIMESTAMP", six.text_type(self.__client_timestamp).encode("utf-8")
)
result = result.replace(
b"THREADS", scalyr_util.json_encode(self.__threads, binary=True)
)
# As an extra extra precaution, we update the current_size to be what it actually turned out to be. We could
# assert here to make sure it's always equal (it should be) but we don't want errors to cause issues for
# customers. Due to the way AddRequest uses this abstraction, we really really need to make sure
# the length() returns the correct result after content() was invoked, so we add in this measure to be safe.
if cache_size:
self.__current_size = len(result)
return result
def set_client_timestamp(self, timestamp, fail_if_buffer_exceeds=None):
"""Updates the client timestamp that will be included in the post fix.
@param timestamp: The timestamp.
@param fail_if_buffer_exceeds: The maximum number of bytes that can be used by the post fix when serialized.
If this is not None, and the size will exceed this amount when the timestamp is changed, then the
timestamp is not changed and False is returned.
@type timestamp: int|float
@type fail_if_buffer_exceeds: None|int
@return: True if the thread was added (can only return False if fail_if_buffer_exceeds is not None)
@rtype: bool
"""
new_timestamp = int(timestamp)
# 2->TODO timestamp should contain only ascii characters, so bytes count and characters count should be the same.
size_difference = len(six.text_type(new_timestamp)) - len(
six.text_type(self.__client_timestamp)
)
if (
fail_if_buffer_exceeds is not None
and self.__current_size + size_difference > fail_if_buffer_exceeds
):
return False
self.__current_size += size_difference
self.__client_timestamp = new_timestamp
return True
def add_log_and_thread_entry(
self, thread_id, thread_name, log_attrs, fail_if_buffer_exceeds=None
):
# 2->TODO: Make it work with binary.
"""Adds in a new thread entry that will be included in the post fix.
@param thread_id: The id of the thread.
@param thread_name: The name of the thread.
@param log_attrs: Log static attributes.
@param fail_if_buffer_exceeds: The maximum number of bytes that can be used by the post fix when serialized.
If this is not None, and the size will exceed this amount when the thread entry is added, then the
thread is not added and False is returned.
@type thread_id: str
@type thread_name: str
@type log_attrs: dict
@type fail_if_buffer_exceeds: None|int
@return: True if the thread was added (can only return False if fail_if_buffer_exceeds is not None)
@rtype: bool
"""
# Calculate the size difference. It is at least the size of taken by the serialized strings.
size_difference = len(scalyr_util.json_encode(thread_name)) + len(
scalyr_util.json_encode(thread_id)
)
if not self.__disable_logfile_addevents_format:
size_difference += len(scalyr_util.json_encode(log_attrs)) + len(
scalyr_util.json_encode(thread_id)
)
# Use the __per_thread_extra_bytes to calculate the additional bytes that will be consumed by serializing
# the JSON object containing the thread id and name. The number of extra bytes depends on whether or not
# there is already an entry in the JSON array, so take that into consideration.
num_threads = len(self.__threads)
if num_threads < 1:
size_difference += PostFixBuffer.__per_thread_extra_bytes[0]
if not self.__disable_logfile_addevents_format:
size_difference += PostFixBuffer.__per_log_extra_bytes[0]
else:
size_difference += PostFixBuffer.__per_thread_extra_bytes[1]
if not self.__disable_logfile_addevents_format:
size_difference += PostFixBuffer.__per_log_extra_bytes[1]
if (
fail_if_buffer_exceeds is not None
and self.__current_size + size_difference > fail_if_buffer_exceeds
):
return False
self.__current_size += size_difference
self.__threads.append({"id": thread_id, "name": thread_name})
if not self.__disable_logfile_addevents_format:
self.__logs.append({"id": thread_id, "attrs": log_attrs})
return True
@property
def position(self):
"""Returns the current `position` for this buffer.
This can be used to reset the buffer to a state before new thread entries were added or timestamps were set.
@return: The position object.
"""
# We store the information just as three entries in an array because we are lazy.
return [self.__current_size, self.__client_timestamp, len(self.__threads)]
def set_position(self, position):
"""Resets the buffer to a previous state.
The contents of the thread JSON array and the client timestamp will be reset to whatever it was when
`position` was invoked.
@param position: The position to reset the buffer state to.
"""
# The position value should by an array with three entries: the size, the client timestamp, and the number
# of threads. Since threads are always added one after another, it is sufficient just to truncate back to that
# previous length. Logs are also added one after another and only at the same time as a thread is added,
# meaning this same check will also find what position to truncate the logs to.
self.__current_size = position[0]
self.__client_timestamp = position[1]
assert len(self.__threads) >= position[2]
if position[2] < len(self.__threads):
self.__threads = self.__threads[0 : position[2]]
if not self.__disable_logfile_addevents_format:
self.__logs = self.__logs[0 : position[2]]
class Event(object):
"""Encapsulates a single event that will be included in an ``AddEventsRequest``.
This abstraction has many optimizations to improve serialization time.
"""
def __init__(
self,
thread_id=None,
attrs=None,
base=None,
disable_logfile_addevents_format=False,
):
"""Creates an instance of an event to include in an AddEventsRequest.
This constructor has two ways of being used.
First, specifying the thread_id and attributes that will apply to the event. The attributes typically only
include the attributes for the log file that the event belongs to. You then must set the per-log line
attributes (such as timestamp and message) using the setters provided below.
The second form is to create an event based on the copy of an already existing ``Event`` instance. This
decreases overall serialization time because the ``attrs`` and ``thread_id`` field's serialization is only
performed once across all for the original copy and shared to all derived copies.
This is meant to really optimize the case where we create one base ``Event`` object for each log file that
we are uploading (specifying that log's thread id and attributes in the constructor). Then, every time
we upload a log line for that log, we copy the base ``Event`` instance and then set the per-log line
attributes using the provided setters.
@param thread_id: Used if not specifying ``base``. The thread id for the event.
@param attrs: Used if not specifying ``base``. The attributes for the event, excluding any attributes
that can be set via the provided setters. If you include one of those attributes in ``attrs``, the
resulting behavior is undefined.
@param base: Used if not specifying ``thread_id`` or ``attrs``. The instance of ``Event`` to copy to
create this instance. Only the original ``thread_id`` and ``attrs`` that was passed into ``base``
are copied. Anything set using the provided ``setters`` will not be used.
@type thread_id: str
@type attrs: dict
@type base: Event
"""
# When we serialize this event object in an AddEventsRequest, it will have the following fields, in this
# form:
# {
# thread_id: "234234", // str, the thread id for the log.
# attrs: {
# <log attributes> // the keys,values for all attributes for this log.
# message: "Log content", // str, the log line content.
# rate: 0.8, // float, if this is a subsampled line, the rate at each it was selected.
# },
# ts: "123123123", // <str, the timestamp for the log line>
# si: "123123", //<str, the sequence identifier>
# sn: 10, // <int, the sequence number -- not provided if sd is provided>
# sd: 1, // <int, the sequence number delta -- provided instead of sn>
# }
#
# So, we can pre-serialize the first part of the message based on thread_id and attrs, and keep a copy of
# that to then serialize the rest of the per-log line fields for later.
#
# The serialization_base (this pre-serialization) will look like:
#
# {
# thread_id: "234234", // str, the thread id for the log.
# attrs: {
# <log attributes> // the keys,values for all attributes for this log.
# message:
#
# Note, we put in the ``message`` field name, so the next thing we have to serialize is the log line content.
# We also then have to close off the attrs object, and eventually the overall object to finish the
# serialization.
# We only stash a copy of attrs for debugging/testing purposes. We really will just serialize it into
# __serialization_base.
self.__log_id = None
if attrs is not None:
self.__attrs = dict(attrs)
else:
self.__attrs = dict()
self.__disable_logfile_addevents_format = disable_logfile_addevents_format
if (attrs is not None or thread_id is not None) and base is not None:
raise Exception("Cannot use both attrs/thread_id and base")
self.__thread_id = None
# Used to get attributes from the parent (such as the logfile attributes) for this event.
self.__parent_event = None
if base is not None:
# We are creating an event that is a copy of an existing one. Re-use the serialization base to capture
# the per-log file attributes.
self.__parent_event = base
self.__serialization_base = base.__serialization_base
self.__thread_id = base.__thread_id
self.__disable_logfile_addevents_format = (
base.__disable_logfile_addevents_format
)
else:
self.__set_attributes(thread_id, self.__attrs)
# The typical per-event fields. Note, all of the fields below are stored as strings, in the serialized
# forms for their event fields EXCEPT message. For example, since ``sequence_id`` should be a string on the
# json object, the __sequence_id field will begin with a double quote. HOWEVER, message (for optimization
# purposes) is not pre-serialized and will not be blackslashed escaped/quoted before being added to the
# output buffer.
self.__message = None
self.__timestamp = None
self.__sequence_id = None
self.__sequence_number = None
self.__sequence_number_delta = None
self.__sampling_rate = None
# Whether or not any of the non-fast path fields were included. The fast fields are message, timestamp, snd.
self.__has_non_optimal_fields = False
self.__num_optimal_fields = 0
def __set_attributes(self, thread_id, attributes):
""" Set the attributes and thread id of an Event.
"""
self.__thread_id = thread_id
self.__attrs = attributes
attributes_to_serialize = self.__get_attributes_to_serialize()
# A new event. We have to create the serialization base using provided information/
# 2->TODO: should it be bytes, or it will be better to leave it as unicode and just convert it
tmp_buffer = io.BytesIO()
# Open base for the event object.
tmp_buffer.write(b"{")
if thread_id is not None:
tmp_buffer.write(b"thread:")
# 2->TODO: in python3 ujson will return result with unicode type,
tmp_buffer.write(scalyr_util.json_encode(thread_id, binary=True))
tmp_buffer.write(b", ")
if not self.__disable_logfile_addevents_format:
tmp_buffer.write(b"log:")
tmp_buffer.write(scalyr_util.json_encode(thread_id, binary=True))
tmp_buffer.write(b", ")
if attributes_to_serialize:
tmp_buffer.write(b"attrs:")
# NOTE: attributes is always a dict so we can simply rewinding logic by assuming }
# is always the last character when serializing a dict to json
tmp_buffer.write(scalyr_util.json_encode(attributes, binary=True)[:-1])
tmp_buffer.write(b",")
else:
tmp_buffer.write(b"attrs:{")
# Add the message field into the json object.
tmp_buffer.write(b"message:")
self.__serialization_base = tmp_buffer.getvalue()
def __get_attributes_to_serialize(self):
"""Return the attributes that should be included in the serialization for this specific event.
This does not include attributes from its parent if using the logfile_addevents_format, and does not include
attributes from this event that already exist in the parent with the same value.
"""
result = dict()
if self.__disable_logfile_addevents_format:
if self.__parent_event:
result = dict(self.__parent_event.__attrs)
if self.__attrs:
result.update(self.__attrs)
else:
if self.__parent_event:
for key in self.__attrs:
if (
key not in self.__parent_event.__attrs
or self.__parent_event.__attrs[key] != self.__attrs[key]
):
result[key] = self.__attrs[key]
else:
result = self.__attrs
return result
def add_attributes(self, attributes, overwrite_existing=False):
""" Adds items attributes to __attrs if the __parent_event doesn't
already have those attributes set.
If overwrite_existing is False an attribute will not be added if the key already exists in __attrs.
"""
if attributes:
attributes = dict(attributes)
changed = False
for key, value in six.iteritems(attributes):
if key not in self.__attrs or overwrite_existing:
changed = True
self.__attrs[key] = value
if changed:
self.__set_attributes(self.__thread_id, self.__attrs)
@property
def attrs(self):
"""
@return: The attributes object and its parent event. If both have the same attribute key the parent's will be
overwritten by this event's.
@rtype: dict
"""
result = dict()
if self.__parent_event and self.__parent_event.__attrs:
result = dict(self.__parent_event.__attrs)
if self.__attrs:
result.update(self.__attrs)
return result
def set_message(self, message):
"""Sets the message field for the attributes for this event.
@param message: The message content.
@type message: str
@return: This object.
@rtype: Event
"""
if self.__message is None and message is not None:
self.__num_optimal_fields += 1
if type(message) is six.text_type:
self.__message = message.encode("utf-8")
else:
self.__message = message
return self
@property
def message(self):
"""Used only for testing.
@return: The message content
@rtype: str
"""
return self.__message
def set_timestamp(self, timestamp):
"""Sets the timestamp field for the attributes for this event.
@param timestamp: The timestamp, in nanoseconds past epoch.
@type timestamp: long
@return: This object.
@rtype: Event
"""
# The timestamp field is serialized as a string to get around overflow issues, so put it in string form now.
if self.__timestamp is None and timestamp is not None:
self.__num_optimal_fields += 1
self.__timestamp = b'"%s"' % six.text_type(timestamp).encode("utf-8")
return self
@property
def timestamp(self):
"""Used only for testing.
@return: the timestamp for the event.
@rtype: long
"""
# We have to cut off the quotes we surrounded the field with when we serialized it.
if self.__timestamp is not None:
return int(self.__timestamp[1:-1])
else:
return None
@property
def log_id(self):
return self.__log_id
def set_sequence_id(self, sequence_id):
"""Sets the sequence id for the event. If this is not invoked, no sequence id will be included
in the serialized event. (Which is an optimization that can be used if the sequence id is the same
as the last serialized event's).
@param sequence_id: The unique id for the sequence this event belongs. This must be globally unique and
usually tied to a single log file. Generally, use UUID here.
@type sequence_id: long
@return: This object.
@rtype: Event
"""
# This is serialized as a string to get around overflow issues, so put in a string now.
self.__sequence_id = ('"%s"' % sequence_id).encode("utf-8")
self.__has_non_optimal_fields = True
return self
@property
def sequence_id(self):
"""Used for testing purposes only
@return: The sequence id or None if not set.
@rtype: str
"""
# We have to cut off the quotes we surrounded the field with when we serialized it.
if self.__sequence_id is not None:
return self.__sequence_id[1:-1]
return None
def set_sequence_number(self, sequence_number):
"""Sets the sequence number for the event. If this is not invoked, no sequence number will be included
in the serialized event. (Which is an optimization that can be used if the sequence id is the same
as the last serialized event's and you use ``sequence_number_delta`` instead to specify the delta between
this events sequence number and the last one.).
@param sequence_number: The sequence number for the event.
@type sequence_number: long
@return: This object.
@rtype: Event
"""
self.__has_non_optimal_fields = True
# It is serialized as a number, so just a toString is called for.
self.__sequence_number = six.text_type(sequence_number).encode("utf-8")
return self
@property
def sequence_number(self):
"""Used for testing purposes only.
@return: The sequence number or None if not set.
@rtype: long
"""
# We have to convert it back to a number.
if self.__sequence_number is not None:
return int(self.__sequence_number)
else:
return None
def set_sequence_number_delta(self, sequence_number_delta):
"""Sets the sequence number delta for the event. If this is not invoked, no sequence number delta will be
included in the serialized event. (Which is what you should do if you specify a ``sequence_number`` instead).
@param sequence_number_delta: The delta between the last sequence number of the one for this event.
@type sequence_number_delta: long
@return: This object.
@rtype: Event
"""
# It is serialized as a number, so just a toString is called for.
if self.__sequence_number_delta is None and sequence_number_delta is not None:
self.__num_optimal_fields += 1
self.__sequence_number_delta = six.text_type(sequence_number_delta).encode(
"utf-8"
)
return self
@property
def sequence_number_delta(self):
"""Uses for testing purposes only.
@return: The sequence number delta if set or None.
@rtype: long
"""
# We have to convert it back to a number.
if self.__sequence_number_delta is not None:
return int(self.__sequence_number_delta)
else:
return None
def set_sampling_rate(self, rate):
"""Sets the sampling rate field for this event.
@param rate The rate
@type rate float
@return: This object.
@rtype: Event
"""
self.__has_non_optimal_fields = True
self.__sampling_rate = six.text_type(rate).encode("utf-8")
return self
def serialize(self, output_buffer):
"""Serialize the event into ``output_buffer``.
@param output_buffer: The buffer to serialize to.
# 2->TODO: output_buffer needs to be BytesIO
@type output_buffer: BytesIO
"""
output_buffer.write(self.__serialization_base)
# Use a special serialization format for message so that we don't have to send CPU time escaping it. This
# is just a length prefixed format understood by Scalyr servers.
scalyr_util.json_scalyr_encode_length_prefixed_string(
self.__message, output_buffer
)
# We fast path the very common case of just a timestamp and sequence delta fields.
if not self.__has_non_optimal_fields and self.__num_optimal_fields == 3:
output_buffer.write(b"}")
output_buffer.write(b",sd:")
output_buffer.write(self.__sequence_number_delta)
output_buffer.write(b",ts:")
output_buffer.write(self.__timestamp)
else:
self.__write_field_if_not_none(
b",sample_rate:", self.__sampling_rate, output_buffer
)
# close off attrs object.
output_buffer.write(b"}")
self.__write_field_if_not_none(b",ts:", self.__timestamp, output_buffer)
self.__write_field_if_not_none(b",si:", self.__sequence_id, output_buffer)
self.__write_field_if_not_none(
b",sn:", self.__sequence_number, output_buffer
)
self.__write_field_if_not_none(
b",sd:", self.__sequence_number_delta, output_buffer
)
# close off the event object.
output_buffer.write(b"}")
def __write_field_if_not_none(self, field_name, field_value, output_buffer):
"""If the specified field value is not None, then emit the field name and the value to the output buffer.
@param field_name: The text to emit before the value.
@param field_value: The value to emit.
@param output_buffer: The buffer to serialize to.
@type field_name: str
@type field_value: str or None
@type output_buffer: BytesIO
"""
# [start of 2->TODO]
# BytesIO type needed. filed_name and field_value should be passed as binary.
# In other case, we should convert them here.
# [end of 2->TOD0]
if field_value is not None:
output_buffer.write(field_name)
output_buffer.write(field_value)
def _rewind_past_close_curly(output_buffer):
"""A utility function for rewinding a buffer that had a JSON object emitted to it. It rewinds past the
last closing curly brace in the buffer, and then also erases the last non-whitespace character after that
if it is a comma (which shouldn't happen in practice). This is meant to prepare the buffer for emitting
new fields into the JSON object's serialization.
@param output_buffer: The buffer to rewind.
@type output_buffer: StringO
"""
# 2->TODO make binary literals.
# Now go back and find the last '}' and delete it so that we can open up the JSON again.
location = output_buffer.tell()
while location > 0:
location -= 1
output_buffer.seek(location)
if output_buffer.read(1) == b"}":
break
# Now look for the first non-white character. We need to add in a comma after it.
last_char = None
while location > 0:
location -= 1
output_buffer.seek(location)
last_char = output_buffer.read(1)
if not last_char.isspace():
break
# If the character happened to a comma, back up over that since we want to write our own comma.
if location > 0 and last_char == b",":
location -= 1
if location < 0:
raise Exception(
'Could not locate trailing "}" and non-whitespace in JSON serialization'
)
# Now chop off everything after the character at the location.
location += 1
output_buffer.seek(location)
output_buffer.truncate()
def create_connection_helper(host, port, timeout=None, source_address=None):
"""Creates and returns a socket connecting to host:port with the specified timeout.
@param host: The host to connect to.
@param port: The port to connect to.
@param timeout: The timeout in seconds to use for all blocking operations on the socket.
@param source_address: The source address, or None.
@return: The connected socket
"""
# This method was copied from Python 2.7's socket.create_connection.
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not None:
sock.settimeout(timeout)
if source_address is not None:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
| {
"content_hash": "a8613134e13b66ff386e77227147c90e",
"timestamp": "",
"source": "github",
"line_count": 2195,
"max_line_length": 134,
"avg_line_length": 43.214578587699314,
"alnum_prop": 0.6108416968879143,
"repo_name": "imron/scalyr-agent-2",
"id": "572ca87c1cf0068bae0889c6bb5faf7ae98bedd4",
"size": "95861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scalyr_agent/scalyr_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1297"
},
{
"name": "Dockerfile",
"bytes": "1461"
},
{
"name": "Python",
"bytes": "2093708"
}
],
"symlink_target": ""
} |
"""Module for object related utils."""
#: Supported container types for serialization/de-serialization (must be a
#: tuple as it's used as a parameter for C{isinstance})
_SEQUENCE_TYPES = (list, tuple, set, frozenset)
class AutoSlots(type):
"""Meta base class for __slots__ definitions.
"""
def __new__(mcs, name, bases, attrs):
"""Called when a class should be created.
@param mcs: The meta class
@param name: Name of created class
@param bases: Base classes
@type attrs: dict
@param attrs: Class attributes
"""
assert "__slots__" not in attrs, \
"Class '%s' defines __slots__ when it should not" % name
attrs["__slots__"] = mcs._GetSlots(attrs)
return type.__new__(mcs, name, bases, attrs)
@classmethod
def _GetSlots(mcs, attrs):
"""Used to get the list of defined slots.
@param attrs: The attributes of the class
"""
raise NotImplementedError
class ValidatedSlots(object):
"""Sets and validates slots.
"""
__slots__ = []
def __init__(self, **kwargs):
"""Constructor for BaseOpCode.
The constructor takes only keyword arguments and will set
attributes on this object based on the passed arguments. As such,
it means that you should not pass arguments which are not in the
__slots__ attribute for this class.
"""
slots = self.GetAllSlots()
for (key, value) in kwargs.items():
if key not in slots:
raise TypeError("Object %s doesn't support the parameter '%s'" %
(self.__class__.__name__, key))
setattr(self, key, value)
@classmethod
def GetAllSlots(cls):
"""Compute the list of all declared slots for a class.
"""
slots = []
for parent in cls.__mro__:
slots.extend(getattr(parent, "__slots__", []))
return slots
def Validate(self):
"""Validates the slots.
This method must be implemented by the child classes.
"""
raise NotImplementedError
def ContainerToDicts(container):
"""Convert the elements of a container to standard Python types.
This method converts a container with elements to standard Python types. If
the input container is of the type C{dict}, only its values are touched.
Those values, as well as all elements of input sequences, must support a
C{ToDict} method returning a serialized version.
@type container: dict or sequence (see L{_SEQUENCE_TYPES})
"""
if isinstance(container, dict):
ret = dict([(k, v.ToDict()) for k, v in container.items()])
elif isinstance(container, _SEQUENCE_TYPES):
ret = [elem.ToDict() for elem in container]
else:
raise TypeError("Unknown container type '%s'" % type(container))
return ret
def ContainerFromDicts(source, c_type, e_type):
"""Convert a container from standard python types.
This method converts a container with standard Python types to objects. If
the container is a dict, we don't touch the keys, only the values.
@type source: None, dict or sequence (see L{_SEQUENCE_TYPES})
@param source: Input data
@type c_type: type class
@param c_type: Desired type for returned container
@type e_type: element type class
@param e_type: Item type for elements in returned container (must have a
C{FromDict} class method)
"""
if not isinstance(c_type, type):
raise TypeError("Container type '%s' is not a type" % type(c_type))
if source is None:
source = c_type()
if c_type is dict:
ret = dict([(k, e_type.FromDict(v)) for k, v in source.items()])
elif c_type in _SEQUENCE_TYPES:
ret = c_type(map(e_type.FromDict, source))
else:
raise TypeError("Unknown container type '%s'" % c_type)
return ret
| {
"content_hash": "c8075b54356e41b4950866e37093985c",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 77,
"avg_line_length": 28.29230769230769,
"alnum_prop": 0.6653072321914084,
"repo_name": "apyrgio/snf-ganeti",
"id": "d25c06f8df70b9db7a98a7d8654b1154f38b01f6",
"size": "5019",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable-2.10-bpo2",
"path": "lib/outils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "1557814"
},
{
"name": "Python",
"bytes": "5311638"
},
{
"name": "Shell",
"bytes": "96816"
}
],
"symlink_target": ""
} |
import sys, os, time, atexit
from signal import SIGTERM
import argparse
## Defines the Daemon base class
#
# \note \par Subclassing
# This class should be subclased. The only thing that needs to be overrided is the run function.
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
## Initializes the deamon
# \param pidfile the file containing the PID
# \param stdin the stdin for the deamon
# \param stdout the stdout for the deamon
# \param stderr the stderr for the deamon
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
## the stdin
self.stdin = stdin
## the stdout
self.stdout = stdout
## the stderr
self.stderr = stderr
## the pid file
self.pidfile = pidfile
##do the UNIX double-fork magic, see Stevens' "Advanced Programming in
# the UNIX Environment" for details (ISBN 0201563177)
# http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
## Actual file for stdin
self.si = open(self.stdin, 'r')
## Actual file for stdout
self.so = open(self.stdout, 'a+')
## Actual file for stderr
self.se = open(self.stderr, 'a+')
os.dup2(self.si.fileno(), sys.stdin.fileno())
os.dup2(self.so.fileno(), sys.stdout.fileno())
os.dup2(self.se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
open(self.pidfile,'w+').write("%s\n" % pid)
## Deletes the pid file
#
def delpid(self):
os.remove(self.pidfile)
## Start the Deamon
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = open(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
## stop the deamon
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = open(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print (str(err))
sys.exit(1)
## Just call stop and start
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
## Run the deamon override this in subclass
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
raise NotImplemented("Run is not implement in {0}".format(self.__class__.__name__))
def routeCmd(self, cmd):
cmds = {
"start": self.start,
"stop": self.stop,
"restart": self.restart,
}
try:
cmds[cmd]()
pass
except KeyError:
print( "Unknown command")
print ("usage: {{ {} }}".format(" | ".join(cmds.keys())))
sys.exit(2)
pass
pass
## Add CLI argument
@classmethod
def AddArg(cls, args):
args.add_argument("-p", "--pid", help="The pid file")
args.add_argument("-i", "--stdin", help="The stdin to write to (defaults to /dev/null)", default="/dev/null")
args.add_argument("-o", "--stdout", help="The stdout to write to (defaults to /dev/null)", default="/dev/null")
args.add_argument("-e", "--stderr", help="The stderr to write to (defaults to /dev/null)", default="/dev/null")
return args
##Build Arguments for constructor
@classmethod
def Construct(cls, args):
args = cls.AddArg(args)
args.parse_args(sys.argv)
return {"pidfile": args.pid, "stdin": args.stdin, "stdout": args.stdout, "stderr": args.stderr}
@classmethod
def Go(cls):
if __name__ == '__main__':
args = argparse.ArgumentParser(description=cls.__doc__)
args.add_argument("cmd")
kwargs = cls.Construct(args)
daemon = cls(**kwargs)
daemon.routeCmd(args.cmd)
pass
pass | {
"content_hash": "094356f024a29e800425826dca83df52",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 127,
"avg_line_length": 38.13793103448276,
"alnum_prop": 0.42495479204339964,
"repo_name": "radding/PythonDaemon",
"id": "f91c2b79f104188427b0fcac0c1b11b7b16007b4",
"size": "7857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BaseDaemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7886"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import os, json
from glob import glob
import numpy as np
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.preprocessing import image
from keras.regularizers import l2, l1
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean
return x[:, ::-1] # reverse axis rgb->bgr
class Vgg16():
"""The VGG 16 Imagenet model"""
def __init__(self):
self.FILE_PATH = 'http://www.platform.ai/models/'
self.create()
self.get_classes()
def get_classes(self):
fname = 'imagenet_class_index.json'
fpath = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')
with open(fpath) as f:
class_dict = json.load(f)
self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
def predict(self, imgs, details=False):
all_preds = self.model.predict(imgs)
idxs = np.argmax(all_preds, axis=1)
preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]
classes = [self.classes[idx] for idx in idxs]
return np.array(preds), idxs, classes
def ConvBlock(self, layers, filters):
model = self.model
for i in range(layers):
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
def FCBlock(self):
model = self.model
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
def FCBlock_reg(self, c = 0.1):
model = self.model
model.add(Dense(4096, activation='relu', W_regularizer = l2(c)))
model.add(Dropout(0.5))
def create(self):
model = self.model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
self.ConvBlock(2, 64)
self.ConvBlock(2, 128)
self.ConvBlock(3, 256)
self.ConvBlock(3, 512)
self.ConvBlock(3, 512)
model.add(Flatten())
self.FCBlock()
self.FCBlock()
model.add(Dense(1000, activation='softmax'))
fname = 'vgg16.h5'
model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))
def get_batches(self, path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):
return gen.flow_from_directory(path, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def finetune(self, batches):
model = self.model
model.pop()
for layer in model.layers: layer.trainable=False
model.add(Dense(batches.nb_class, activation='softmax'))
self.compile()
def finetune_reg(self, batches, c = 0.1):
model = self.model
model.pop()
for layer in model.layers: layer.trainable=False
model.add(Dense(batches.nb_class, activation='softmax', W_regularizer = l2(c)))
self.compile()
'''
def finetune_flex(self, batches, retrain_dense_layers):
'''
#Adding more options to the finetune method:
#1. ability to remove and readd more than 1 layer
#2. ability to add regularization
'''
layers = model.layers
# Get the indexes of the dense layers...
dense_idx = [index for index,layer in enumerate(layers) if type(layer) is Dense]
# ...and set this and all subsequent layers to trainable
first_dense_id = dense_idx[-retrain_dense_layers]
for layer in layers[first_dense_id:]:
layer.trainable=True
model = self.model
model.pop()
for layer in model.layers: layer.trainable=False
model.add(Dense(batches.nb_class, activation='softmax'))
self.compile()
'''
def compile(self, lr=0.001):
self.model.compile(optimizer=Adam(lr=lr),
loss='categorical_crossentropy', metrics=['accuracy'])
def fit_data(self, trn, labels, val, val_labels, nb_epoch=1, batch_size=64):
self.model.fit(trn, labels, nb_epoch=nb_epoch,
validation_data=(val, val_labels), batch_size=batch_size)
def fit(self, batches, val_batches, nb_epoch=1):
self.model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=nb_epoch,
validation_data=val_batches, nb_val_samples=val_batches.nb_sample)
def test(self, path, batch_size=8):
test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)
return test_batches, self.model.predict_generator(test_batches, test_batches.nb_sample)
| {
"content_hash": "daf36e2e112ae7df69341a07b8c19593",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 118,
"avg_line_length": 34.5,
"alnum_prop": 0.6380625476735317,
"repo_name": "briandalessandro/courses",
"id": "23f33738ade0b45fa92479e9dca7d80b852d8ae6",
"size": "5244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deeplearning1/nbs/utils/vgg16.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9578479"
},
{
"name": "Python",
"bytes": "33839"
},
{
"name": "Shell",
"bytes": "18882"
}
],
"symlink_target": ""
} |
from behave import given
import os
import shlex
import shutil
import subprocess as sp
import sure
@given(u'a fresh machine with mkdo installed')
def step_impl(context):
docker_dir = 'docker'
docker_name = 'mkdo/acceptance-test-run'
shutil.rmtree(docker_dir, ignore_errors=True)
os.mkdir(docker_dir)
shutil.copy('../dist/mkdo-0.1.0-py2.py3-none-any.whl', os.path.join(docker_dir))
with open(os.path.join(docker_dir, 'Dockerfile'), 'w') as f:
f.write("""
FROM debian:8.7
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python \
python-pip \
&& \
rm -rf /var/lib/apt/lists/* && \
apt-get clean
ADD mkdo-0.1.0-py2.py3-none-any.whl /tmp
RUN pip install /tmp/mkdo-0.1.0-py2.py3-none-any.whl
""")
return_code = sp.call(['docker', 'build',
'--force-rm', '--rm=true',
'-t', docker_name, docker_dir
], stdout=sp.PIPE)
return_code.should.be.equal(0)
context.docker_name = docker_name
@given(u'an empty source directory')
def step_impl(context):
context.fake_srcdir = os.path.join(os.getcwd(), 'fake_srcdir')
os.mkdir(context.fake_srcdir)
@when(u'the user runs "{}"')
def step_impl(context, command):
args = shlex.split(command)
p = sp.Popen(['docker', 'run', '--rm',
'--volume=%s:%s:rw' % (context.fake_srcdir, context.fake_srcdir),
'--workdir=%s' % (context.fake_srcdir,),
context.docker_name] + args,
stdout=sp.PIPE, stderr=sp.PIPE)
context.stdout, context.stderr = p.communicate()
context.returncode = p.returncode
@then(u'a "do/build" script is created')
def step_impl(context):
do_build = os.path.join(context.fake_srcdir, 'do', 'build')
with sure.ensure('do/build should exist'):
os.path.exists(do_build).should.be.true
with sure.ensure('do/build should be executable'):
os.access(do_build, os.X_OK).should.be.true
| {
"content_hash": "5d22765de2ab86b7072efc34dbb5d91a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 33.09836065573771,
"alnum_prop": 0.6092124814264487,
"repo_name": "ben--/mkdo",
"id": "32efb133b6fd4bcfcaf3d38753c62b51bc0e4918",
"size": "2019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "features/steps/test-steps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "Python",
"bytes": "3486"
},
{
"name": "Shell",
"bytes": "9772"
}
],
"symlink_target": ""
} |
token_query = '''
SELECT
T.token,
count(*) as C
FROM "TokenLinks" TL JOIN "Token" T
ON T.tokenid = TL.target
WHERE TL.source =
(SELECT tokenid FROM "Token" T WHERE T.token = :token)
AND TL.distance > -4 AND TL.distance < 28 and T.dateid=:id
GROUP BY T.token HAVING count(*) > 2
ORDER BY C DESC
LIMIT 10000;'''
token_query2 = '''
SELECT
T.token,
TL.index,
TL.distance
--SUM(TL.index + TL.distance) as loc
FROM "TokenLinks" TL JOIN "Token" T
ON T.tokenid = TL.target
WHERE TL.source =
(SELECT tokenid FROM "Token" T WHERE T.token = :token)
AND TL.distance > -8 AND TL.distance < 28
and T.dateid=:id
--GROUP BY T.token, TL.distance HAVING count(*) > 2
--ORDER BY loc DESC
ORDER BY TL.index, TL.distance
LIMIT 10000;'''
| {
"content_hash": "4bd998b3c8028da3755d8a48be111b64",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 62,
"avg_line_length": 24.485714285714284,
"alnum_prop": 0.5822637106184364,
"repo_name": "Ropes/PDX-Council-Minutes-Data",
"id": "e496ca8cd3c6c8a935e572c85b59ba68ea5883ec",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskr/queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1168"
},
{
"name": "Go",
"bytes": "8486"
},
{
"name": "HTML",
"bytes": "47849"
},
{
"name": "Python",
"bytes": "40325"
}
],
"symlink_target": ""
} |
"""
Module: mediahandler.types.audiobooks
Module contains:
- |MHAudiobook|
Child class of MHMediaType for the audiobooks media type.
- |get_book_info()|
Makes API request to Google Books and returns results.
"""
import os
import re
from re import search
import logging
from math import ceil
from shutil import copy, move
from subprocess import Popen, PIPE
from os import path, listdir, makedirs
from googleapiclient.discovery import build
from mutagen.mp3 import MP3
from mutagen.ogg import OggFileType
import mediahandler as mh
try:
from urllib.request import build_opener
except ImportError:
from urllib2 import build_opener
try:
XRANGE = xrange
except NameError:
XRANGE = range
def get_book_info(api_key, query):
"""Makes API request to Google Books.
Required arguments:
- api_key
String. A valid Google API public access key.
- query
String. Search string to submit to Google.
"""
logging.info("Querying Google Books")
# Connect to Google Books API
service = build('books', 'v1', developerKey=api_key)
# Make API request
request = service.volumes().list(
q=query,
orderBy="relevance",
printType="books",
maxResults=5
)
# Get response
response = request.execute()
logging.debug("Google response:\n%s", response)
# Get the top response
for book in response.get('items', []):
# Get publication date
published = book['volumeInfo']['publishedDate']
logging.debug("Published date: %s", published)
# Extract just the year
find_year = re.match(r"(\d{4})", published)
year = ''
if find_year is not None:
year = find_year.group(1)
# Check for categories
category = "Audiobook"
if 'categories' in book['volumeInfo']:
category = book['volumeInfo']['categories'][0]
# look for titles
long_title = book['volumeInfo']['title']
subtitle = None
if 'subtitle' in book['volumeInfo']:
long_title = '{0}: {1}'.format(book['volumeInfo']['title'],
book['volumeInfo']['subtitle'])
subtitle = book['volumeInfo']['subtitle']
# Set book information file structure
logging.info("Google Book ID: %s", book['id'])
new_book_info = {
'id': book['id'],
'short_title': book['volumeInfo']['title'],
'long_title': long_title,
'subtitle': subtitle,
'year': year,
'genre': category,
'author': ", ".join(book['volumeInfo']['authors']),
'cover': book['volumeInfo']['imageLinks']['thumbnail'],
}
break
return new_book_info
class MHAudiobook(mh.MHObject):
"""Child class of MHObject for the audiobooks media type.
Required arguments:
- settings
Dict or MHSettings object.
- push
MHPush object.
Public method:
- |add()|
Main wrapper function for adding audiobook files. Processes
calls to the Google Books API and ABC chaptering tool.
"""
def __init__(self, settings, push):
"""Initialize the MHAudiobook class.
Required arguments:
- settings
Dict or MHSettings object.
- push
MHPush object.
"""
logging.info("Starting audiobook handler class")
self.folder = None
super(MHAudiobook, self).__init__(settings, push)
# Set globals
self.book_info = {}
self.push = push
self.orig_path = None
self.file_type = None
self.type = re.sub(r'^mh', '', type(self).__name__.lower())
self.cover_img_name = 'cover.jpg'
# Set up book settings
self.set_settings({
'regex': {
"nc": r"\.(mp3|ogg|wav)$",
"c": r"\.(m4b)$",
},
'audio': {
'MP3': MP3,
'OGG': OggFileType,
},
})
# Check for null path in settings
if self.folder is None:
self.folder = path.join(
path.expanduser("~"), 'Media', 'Audiobooks')
logging.debug("Using default path: %s", self.folder)
# Check destination exists
if not path.exists(self.folder):
self.push.failure("Folder for Audiobooks not found: {0}".format(
self.folder))
# Look for Google api key
if self.api_key is None:
logging.warning("Google Books API key not found")
raise Warning("Google Books API key not found")
# Convert hours to seconds for chapter length
self.max_length = self.chapter_length * 3600
logging.debug("Using chapter length: %s", self.max_length)
def add(self, raw):
"""Main wrapper function for adding audiobook files. Processes calls
to the Google Books API and ABC chaptering tool.
Required arguments:
- raw
Valid path to audiobook files to be processed.
"""
logging.info("Getting audiobook")
# Parse string & get query
refined = self._clean_string(raw)
logging.debug("Cleaned search string: %s", refined)
# Use custom search string, if defined
if hasattr(self, 'custom_search'):
refined = self.custom_search
logging.debug("Custom search query: %s", refined)
# Get book info from Google
self.set_book_info(refined)
logging.debug(self.book_info.__dict__)
# Deal with single files
if path.isfile(raw):
raw = self._single_file(raw, self.book_info.short_title)
# Save cover image to file
cover_file = self._save_cover(raw, self.book_info.cover)
logging.debug("Cover image: %s", cover_file)
# Get files and chapterize files, if enabled
get_result = self._get_files(raw, self.make_chapters)
(is_chapterized, book_files) = get_result
logging.debug(book_files)
# Verify success
if not is_chapterized:
self.push.failure("Unable to chapterize book: {0}".format(raw))
# Move & rename files
(move_files, skipped) = self._move_files(
book_files, self.make_chapters)
logging.debug("Move was successful: %s", move_files)
# Verify success
if not move_files and not skipped:
return self.push.failure(
"Unable to move book files: {0}".format(raw))
# format book title
book_title = '"{0}" by {1}'.format(self.book_info.long_title,
self.book_info.author)
logging.info("Book title: %s", book_title)
return [book_title], skipped
def _clean_string(self, str_path):
"""Cleans query string before sending to Google API.
Takes in a string parse from media file path and removes non-
alphanumeric characters, extra whitespace, blacklisted words,
and other unwanted characters.
"""
logging.info("Cleaning up path string")
# Get query from folder path
find_book = str_path.rsplit(os.path.sep)[1:]
string = find_book[-1]
logging.debug("Initial string: %s", string)
# Save original path for later
self.orig_path = path.dirname(str_path)
# Get blacklist items from file
blacklist_file = path.join(mh.__mediaextras__, 'blacklist.txt')
with open(blacklist_file) as blacklist_io:
blacklist = [line.strip() for line in blacklist_io]
# Convert blacklist array to regex string
blacklist = "|".join(blacklist)
blacklist_regex = re.compile(blacklist, re.I)
# Remove blacklist words
count = 1
while count > 0:
(string, count) = re.subn(blacklist_regex, " ", string, 0)
# Setup order of regexes
regexes = [
r"[\(\[\{].*[\)\]\}]",
r"[^a-zA-Z ]",
r"[A-Z]{3,4}",
r"\s{2,10}",
]
# Loop through regexes
for regex in regexes:
count = 1
while count > 0:
(string, count) = re.subn(regex, ' ', string)
# Remove trailing or start whitespace
count = 1
while count > 0:
(string, count) = re.subn(r'(^\s|\s$)', '', string)
return string
def set_book_info(self, query):
"""A wrapper function for calling get_book_info().
Converts resulting dict into object members.
"""
result = get_book_info(self.api_key, query)
self.book_info = self.MHSettings(result)
def _single_file(self, file_path, path_name):
"""Extra processing needed for single audiobook files.
Creates new folder and moves file into it.
"""
logging.info("Handling as single file")
# Set root path
path_root = self.orig_path
# Set new folder
new_folder = path.join(path_root, path_name)
logging.debug("New folder: %s", new_folder)
# Create folder
if not path.exists(new_folder):
makedirs(new_folder)
# Get file name
file_name = path.basename(file_path)
# Set new path
new_path = path.join(new_folder, file_name)
logging.debug("New path: %s", new_path)
# Move file
move(file_path, new_path)
return new_folder
def _save_cover(self, img_dir, img_url):
"""Retrieves and saves cover image from Google results.
Will use an existing cover image if it is in the same directory
as the main files and is named 'cover.jpg'.
"""
logging.info("Saving audiobook cover image")
# Set new image file path
img_path = path.join(img_dir, self.cover_img_name)
logging.debug("Image URL: %s", img_url)
logging.debug("Image Path: %s", img_path)
# Check to see if file exists
if path.isfile(img_path):
logging.warning("Cover image already exists")
# If so, return none
return img_path
# Clean up image url
no_curl = re.sub(r"(\&edge=curl)", "", img_url)
logging.debug("Cleaned cover url: %s", no_curl)
# Set image request headers
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
# Get image info from web & open
response = opener.open(no_curl)
opener.close()
# Write image to new cover file
with open(img_path, "wb") as output:
output.write(response.read())
# Add image to book info
self.book_info.cover_image = img_path
return img_path
def _get_files(self, file_dir, make_chapters):
"""Parses directory to look for and process audiobook files.
Returns existing chaptered audiobook files (.m4b). If 'make_chapters'
setting is disabled, will return non-chaptered files (.mp3, .ogg). Or
if 'make_chapters' is enabled, will look for non-chaptered files and
send them to be chapterized.
"""
logging.info("Retrieving audiobook files")
# default values
is_chapterized = False
to_chapterize = []
book_files = []
file_list = []
# Get list of files
file_list = listdir(file_dir)
# loop through all the files in dir
for item in sorted(file_list):
# Look for file types we want
good_file = re.search(self.regex.c, item, re.I)
if good_file:
full_path = path.join(file_dir, item)
book_files.append(full_path)
# Look for file types we can chapterize
bad_file = re.search(self.regex.nc, item, re.I)
if bad_file:
self.file_type = bad_file.group(1)
to_chapterize.append(item)
# See if any files need chapterizing (if enabled)
if make_chapters:
logging.debug("Already chaptered file count: %s", len(book_files))
logging.debug("To chapter file count: %s", len(to_chapterize))
# If there are no chaptered files, chapterize other files
if to_chapterize and not book_files:
(chapter_success, new_files) = self._chapterize_files(
file_dir, to_chapterize)
if chapter_success:
book_files = new_files
else:
return False, new_files
# If chapterizing is disabled, return all files
elif not book_files and to_chapterize:
logging.debug("Not making chapters")
book_files = to_chapterize
# If there are chaptered files but other files too, note it
elif book_files and to_chapterize:
logging.warning('Non-chaptered files were found and ignored: %s',
', '.join(to_chapterize))
# Make sure we have chapterized files to return
logging.debug("Final book file count: %s", len(book_files))
if book_files:
is_chapterized = True
return is_chapterized, book_files
def _chapterize_files(self, file_path, file_array):
"""Chapterizes non-chaptered audiobook files (.mp3, .ogg)
Sends query to ABC application to convert files into chaptered
audiobook files based on the 'chapter_length' setting.
"""
logging.info("Chapterizing audiobook files")
new_files = []
# Get chapter parts
file_parts = self._get_chapters(file_path, file_array,
self.file_type)
# Create m4b for each file part
for i, file_part in enumerate(file_parts):
part_path = path.join(file_path, 'Part {0}'.format(str(i+1)))
# Define chapter query
b_cmd = [self.php, '-f', self.abc,
file_part, # Path to book files
self.book_info.author.encode("utf8"), # artist
self.book_info.long_title.encode("utf8"), # album
self.book_info.short_title.encode("utf8"), # title
self.book_info.genre.encode("utf8"), # genre
self.book_info.year.encode("utf8"), # year
self.file_type] # file type
logging.debug("ABC query:\n%s", b_cmd)
# Process query
b_open = Popen(b_cmd, stdout=PIPE, stderr=PIPE)
# Get output
(output, err) = b_open.communicate()
logging.debug("ABC output: %s", output)
logging.debug("ABC err: %s", err)
# Close process
b_open.terminate()
# Find file names in output
bfiles = search(r"Audiobook \'(.*)\.m4b\' created succsessfully!",
output)
if bfiles is None:
return False, output
# Set full file path
created_file = path.join(
part_path, '{0}.m4b'.format(bfiles.group(1)))
new_file_path = path.join(file_path, '{0} - {1}.m4b'.format(
bfiles.group(1), str(i+1)))
# Rename file with part #
move(created_file, new_file_path)
logging.debug("New file path: %s", new_file_path)
# Add to array
new_files.append(new_file_path)
return True, new_files
def _get_chapters(self, file_path, file_array, file_type):
"""Breaks up non-chaptered files in to folders for ABC processing.
Returns an array of paths to folders. Each folder is for an audiobook
chaptered file part based on the results of _calculate_chunks().
"""
logging.info("Determining book parts")
# Calculate chunks
book_chunks = []
chunks = self._calculate_chunks(file_path, file_array, file_type)
# Create new subfolders for parts
logging.info("Creating book part subfolders")
for i, chunk in enumerate(chunks):
# Create new folder for part
part_path = path.join(file_path, 'Part {0}'.format(str(i+1)))
if not path.exists(part_path):
makedirs(part_path)
# Move files for part into new path
for get_chunk in chunk:
start_path = path.join(file_path, get_chunk)
end_path = path.join(part_path, get_chunk)
copy(start_path, end_path)
# Copy over cover image
cover_start = path.join(file_path, self.cover_img_name)
cover_end = path.join(part_path, self.cover_img_name)
copy(cover_start, cover_end)
# Add new part folder to array
book_chunks.append(part_path)
return book_chunks
def _calculate_chunks(self, file_path, file_array, file_type):
"""Calculates how many different chaptered file parts should be made
by ABC based on the 'chapter_length' setting.
Returns an array of arrays containing the paths to the non-chaptered
files for each new part.
"""
# Defaults
file_type = file_type.upper()
total_length = 0
book_parts = 0
# Sum all the file durations
for get_file in file_array:
full_path = path.join(file_path, get_file)
audio_track = getattr(self.audio, file_type)(full_path)
total_length += audio_track.info.length
logging.debug("%s: %s", get_file, audio_track.info.length)
logging.debug("Total book length: %s seconds", total_length)
# Check against defined max part length
if total_length <= self.max_length:
book_parts = 1
logging.debug("Parts: %s", book_parts)
chunks = [file_array]
# Determine how many parts should be made
else:
book_parts = int(ceil(total_length / self.max_length))
logging.debug("Parts: %s", book_parts)
# Count files
logging.debug("File count: %s", len(file_array))
# Calculate array chunk size
array_chunk = int(ceil(len(file_array) / book_parts))
logging.debug("Array chunks: %s", array_chunk)
# Create array chunks
if array_chunk > 0:
chunks = [file_array[x:x+array_chunk]
for x in XRANGE(0, len(file_array), array_chunk)]
else:
chunks = [file_array]
return chunks
def _move_files(self, file_array, has_chapters):
"""Move and renames audiobook files based on Google results.
Moves created audiobook files to chosen Audiobook folder location.
Saves files with the following naming scheme from Google results: ::
<audiobooks folder>/<author>/<full title>/<short title>.m4b
Or for non-chaptered files, the file name is: ::
<track no.> - <short title>.<mp3 or ogg>
"""
logging.info("Moving audiobook files")
# Create folder-friendly title
if self.book_info.subtitle is None:
folder_title = self.book_info.short_title
else:
folder_title = '{0}_ {1}'.format(
self.book_info.short_title, self.book_info.subtitle)
# Set new book directory path
book_dir = path.join(self.folder,
self.book_info.author, folder_title)
logging.debug("New directory: %s", book_dir)
# Create the folder
if not path.exists(book_dir):
makedirs(book_dir)
# Sort files in order
sorted_array = sorted(file_array)
# Loop through files
moved_files = []
skipped_files = []
for i, book_file in enumerate(sorted_array):
# Set new name
new_name = self.book_info.short_title
new_path = ''
# Use chapter naming for chapters
if has_chapters:
# Set start path
start_path = book_file
# Check for multiple parts
if len(file_array) > 1:
book_part = ', Part {0}'.format(str(i+1))
new_name = self.book_info.short_title + book_part
# Set new file path
new_path = path.join(book_dir, '{0}.m4b'.format(new_name))
else:
# Set non-chaptered file paths & formatting
start_path = path.join(self.orig_path, book_file)
new_name = '{0:02d} - {1}.{2}'.format(
i+1, new_name, self.file_type)
new_path = path.join(book_dir, new_name)
logging.debug("Start path: %s", start_path)
logging.debug("New path: %s", new_path)
# Check for duplicate
if path.isfile(new_path):
# Add to skipped file list
skipped_files.append(new_path)
logging.warning("Duplicate file was skipped: %s", new_path)
else:
# Copy the file
copy(start_path, new_path)
# Add to moved file list
moved_files.append(new_name)
return moved_files, skipped_files
| {
"content_hash": "ae629b8c54577534ceec45e5c624c406",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 78,
"avg_line_length": 32.20864381520119,
"alnum_prop": 0.5573755321117897,
"repo_name": "ErinMorelli/em-media-handler",
"id": "290867552a8c42f94571c8486afb87b62b3a8c01",
"size": "22323",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-linting",
"path": "mediahandler/types/audiobooks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "705"
},
{
"name": "Python",
"bytes": "218593"
}
],
"symlink_target": ""
} |
"""Support for displaying persistent notifications."""
from collections import OrderedDict
import logging
from typing import Any, Mapping, MutableMapping, Optional
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.const import ATTR_FRIENDLY_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.template import Template
from homeassistant.loader import bind_hass
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
# mypy: allow-untyped-calls, allow-untyped-defs
ATTR_CREATED_AT = "created_at"
ATTR_MESSAGE = "message"
ATTR_NOTIFICATION_ID = "notification_id"
ATTR_TITLE = "title"
ATTR_STATUS = "status"
DOMAIN = "persistent_notification"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED = "persistent_notifications_updated"
SERVICE_CREATE = "create"
SERVICE_DISMISS = "dismiss"
SERVICE_MARK_READ = "mark_read"
SCHEMA_SERVICE_CREATE = vol.Schema(
{
vol.Required(ATTR_MESSAGE): vol.Any(cv.dynamic_template, cv.string),
vol.Optional(ATTR_TITLE): vol.Any(cv.dynamic_template, cv.string),
vol.Optional(ATTR_NOTIFICATION_ID): cv.string,
}
)
SCHEMA_SERVICE_DISMISS = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
SCHEMA_SERVICE_MARK_READ = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
DEFAULT_OBJECT_ID = "notification"
_LOGGER = logging.getLogger(__name__)
STATE = "notifying"
STATUS_UNREAD = "unread"
STATUS_READ = "read"
@bind_hass
def create(hass, message, title=None, notification_id=None):
"""Generate a notification."""
hass.add_job(async_create, hass, message, title, notification_id)
@bind_hass
def dismiss(hass, notification_id):
"""Remove a notification."""
hass.add_job(async_dismiss, hass, notification_id)
@callback
@bind_hass
def async_create(
hass: HomeAssistant,
message: str,
title: Optional[str] = None,
notification_id: Optional[str] = None,
) -> None:
"""Generate a notification."""
data = {
key: value
for key, value in [
(ATTR_TITLE, title),
(ATTR_MESSAGE, message),
(ATTR_NOTIFICATION_ID, notification_id),
]
if value is not None
}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_CREATE, data))
@callback
@bind_hass
def async_dismiss(hass: HomeAssistant, notification_id: str) -> None:
"""Remove a notification."""
data = {ATTR_NOTIFICATION_ID: notification_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_DISMISS, data))
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the persistent notification component."""
persistent_notifications: MutableMapping[str, MutableMapping] = OrderedDict()
hass.data[DOMAIN] = {"notifications": persistent_notifications}
@callback
def create_service(call):
"""Handle a create notification service call."""
title = call.data.get(ATTR_TITLE)
message = call.data.get(ATTR_MESSAGE)
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
if notification_id is not None:
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
else:
entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, DEFAULT_OBJECT_ID, hass=hass
)
notification_id = entity_id.split(".")[1]
attr = {}
if title is not None:
if isinstance(title, Template):
try:
title.hass = hass
title = title.async_render(parse_result=False)
except TemplateError as ex:
_LOGGER.error("Error rendering title %s: %s", title, ex)
title = title.template
attr[ATTR_TITLE] = title
attr[ATTR_FRIENDLY_NAME] = title
if isinstance(message, Template):
try:
message.hass = hass
message = message.async_render(parse_result=False)
except TemplateError as ex:
_LOGGER.error("Error rendering message %s: %s", message, ex)
message = message.template
attr[ATTR_MESSAGE] = message
hass.states.async_set(entity_id, STATE, attr)
# Store notification and fire event
# This will eventually replace state machine storage
persistent_notifications[entity_id] = {
ATTR_MESSAGE: message,
ATTR_NOTIFICATION_ID: notification_id,
ATTR_STATUS: STATUS_UNREAD,
ATTR_TITLE: title,
ATTR_CREATED_AT: dt_util.utcnow(),
}
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def dismiss_service(call):
"""Handle the dismiss notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
return
hass.states.async_remove(entity_id, call.context)
del persistent_notifications[entity_id]
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def mark_read_service(call):
"""Handle the mark_read notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
_LOGGER.error(
"Marking persistent_notification read failed: "
"Notification ID %s not found",
notification_id,
)
return
persistent_notifications[entity_id][ATTR_STATUS] = STATUS_READ
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
hass.services.async_register(
DOMAIN, SERVICE_CREATE, create_service, SCHEMA_SERVICE_CREATE
)
hass.services.async_register(
DOMAIN, SERVICE_DISMISS, dismiss_service, SCHEMA_SERVICE_DISMISS
)
hass.services.async_register(
DOMAIN, SERVICE_MARK_READ, mark_read_service, SCHEMA_SERVICE_MARK_READ
)
hass.components.websocket_api.async_register_command(websocket_get_notifications)
return True
@callback
@websocket_api.websocket_command({vol.Required("type"): "persistent_notification/get"})
def websocket_get_notifications(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: Mapping[str, Any],
) -> None:
"""Return a list of persistent_notifications."""
connection.send_message(
websocket_api.result_message(
msg["id"],
[
{
key: data[key]
for key in (
ATTR_NOTIFICATION_ID,
ATTR_MESSAGE,
ATTR_STATUS,
ATTR_TITLE,
ATTR_CREATED_AT,
)
}
for data in hass.data[DOMAIN]["notifications"].values()
],
)
)
| {
"content_hash": "3266481faeb49f8046a4b200a124192d",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 87,
"avg_line_length": 31.97826086956522,
"alnum_prop": 0.6397008837525493,
"repo_name": "partofthething/home-assistant",
"id": "589cc97baeafd919cd2d4fe54dffdcd612241ffb",
"size": "7355",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/persistent_notification/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_list_or_404, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from .models import Todo
# Create your views here.
@login_required(login_url='/accounts/signin/')
def home(request):
'''
default page for app&prj
'''
return render(request, 'base.html')
@login_required(login_url='accounts/signin/')
def list_view(request):
'''
Todo list view
'''
todo_list = get_list_or_404(Todo, owner=request.user)
return render(request, 'core/todo_list.html', {'todo_list':todo_list})
@login_required(login_url='accounts/signin/')
def add_view(request):
'''
Add todo item view
'''
if request.method == 'POST':
content = request.POST['content']
owner = request.user
td = Todo(content=content, owner=owner)
try:
td.save()
return HttpResponseRedirect(reverse('todo-list'))
except IntegrityError:
return render(request, 'core/todo_form.html', {'content':content})
else:
return render(request, 'core/todo_form.html')
@login_required(login_url='accounts/signin/')
def edit_view(request, tid):
'''
Edit todo item view
'''
todo = get_object_or_404(Todo, pk=tid)
if request.method == 'POST':
todo.content = request.POST.get('content','')
todo.status = request.POST.get('status',False)
try:
todo.save()
return HttpResponseRedirect(reverse('todo-list'))
except IntegrityError:
return render(request, 'core/todo_form.html', {'content':todo.content, 'status':todo.status})
else:
return render(request, 'core/todo_form.html', {'content':todo.content, 'status':todo.status})
| {
"content_hash": "e2a24795b855bd40d8144a5119866592",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 105,
"avg_line_length": 31.915254237288135,
"alnum_prop": 0.6494954859267127,
"repo_name": "kodingway/django-teardown-todo",
"id": "542d44457cfde21c787ddc24eb11591c2f4a3ad0",
"size": "1883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "807"
},
{
"name": "HTML",
"bytes": "3379"
},
{
"name": "Python",
"bytes": "9568"
}
],
"symlink_target": ""
} |
import concurrent.futures
import functools
import peewee
from feedbuffer import settings, log
_database = peewee.SqliteDatabase(settings.DATABASE_PATH)
_logger = log.get_logger(__name__)
# Easy way to queue function calls and execute them in a single thread, without having to manually write
# producer-consumer logic.
_database_executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
class Model(peewee.Model):
class Meta:
database = _database
class Feed(Model):
url = peewee.TextField(unique=True)
update_interval = peewee.IntegerField(default=settings.DEFAULT_UPDATE_INTERVAL)
data = peewee.TextField()
class FeedItem(Model):
id_ = peewee.TextField(unique=True)
data = peewee.TextField()
feed = peewee.ForeignKeyField(Feed, related_name='entries')
_database.create_tables([Feed, FeedItem], safe=True)
def _execute_in(executor):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
future = executor.submit(function, *args, **kwargs)
return future.result()
return wrapper
return decorator
def _get_feed_query(url):
return Feed.select().where(Feed.url == url)
def _feed_item_exists(feed, id_):
return FeedItem.select().where(FeedItem.feed == feed and FeedItem.id_ == id_).exists()
def _feed_exists(url):
return _get_feed_query(url).exists()
def _get_feed(url):
return _get_feed_query(url).get()
@_execute_in(_database_executor)
def feed_exists(url):
return _get_feed_query(url).exists()
@_execute_in(_database_executor)
def get_feed(url):
return _get_feed(url)
@_execute_in(_database_executor)
def update_feed(url, feed_data, entries):
if _feed_exists(url):
feed = _get_feed(url)
else:
feed = Feed(url=url, data=feed_data)
feed.save()
data_source = [
{'id_': id_, 'data': entry, 'feed': feed} for (id_, entry) in entries if not _feed_item_exists(feed, id_)
]
_logger.info('Updating feed: %s with %d new entries...', url, len(data_source))
with _database.atomic():
FeedItem.insert_many(data_source).execute()
feed.data = feed_data
feed.save()
@_execute_in(_database_executor)
def flush_feed(feed):
query = FeedItem.delete().where(FeedItem.feed == feed)
query.execute()
# Generic way to update data in a model instance using the write executor
@_execute_in(_database_executor)
def update_model_data(model, **kwargs):
for key, value in kwargs.items():
setattr(model, key, value)
model.save()
| {
"content_hash": "9aaea3d9d5c934a6ac3f49c64fe02b72",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 113,
"avg_line_length": 24.38679245283019,
"alnum_prop": 0.6707930367504835,
"repo_name": "cryzed/Feedbuffer",
"id": "f9a6f31c3d029e4cf431a0b19c481311db1f1006",
"size": "2585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feedbuffer/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9477"
}
],
"symlink_target": ""
} |
import pytest
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import ApplianceDeviceSnmpV1TrapDestinationsFactsModule
ERROR_MSG = 'Fake message error'
PARAMS_MANDATORY_MISSING = dict(
config='config.json'
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name='10.0.0.4'
)
PARAMS_GET_ALL = dict(
config='config.json'
)
PRESENT_CONFIGURATION = [{
"communityString": "public",
"destination": "10.0.0.4",
"port": 162,
"uri": "/rest/appliance/trap-destinations/1"
}]
@pytest.mark.resource(TestApplianceDeviceSnmpV1TrapDestinationsFactsModule='appliance_device_snmp_v1_trap_destinations')
class TestApplianceDeviceSnmpV1TrapDestinationsFactsModule(OneViewBaseFactsTest):
@pytest.fixture(autouse=True)
def specific_set_up(self, setUp):
self.mock_ov_client.api_version = 600
def test_should_get_all_snmp_v1_trap_destinations(self):
self.resource.get_all.return_value = PRESENT_CONFIGURATION
self.mock_ansible_module.params = PARAMS_GET_ALL
ApplianceDeviceSnmpV1TrapDestinationsFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(appliance_device_snmp_v1_trap_destinations=PRESENT_CONFIGURATION)
)
def test_should_get_snmp_v1_trap_destinations_by_name(self):
self.resource.data = PRESENT_CONFIGURATION
self.resource.get_by_name.return_value = self.resource
self.mock_ansible_module.params = PARAMS_GET_BY_NAME
ApplianceDeviceSnmpV1TrapDestinationsFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(appliance_device_snmp_v1_trap_destinations=PRESENT_CONFIGURATION)
)
if __name__ == '__main__':
pytest.main([__file__])
| {
"content_hash": "9236c51f78f3a658d01b95a93c27aef0",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 120,
"avg_line_length": 31.233333333333334,
"alnum_prop": 0.7091782283884739,
"repo_name": "HewlettPackard/oneview-ansible",
"id": "1bdcb2cc4f515fbb8565c6817519588ce57deff4",
"size": "2533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_oneview_appliance_device_snmp_v1_trap_destinations_facts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1716153"
},
{
"name": "Shell",
"bytes": "5675"
}
],
"symlink_target": ""
} |
'''
Exodus Add-on
Copyright (C) 2017 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import pkgutil
import os.path
from resources.lib.modules import log_utils
__all__ = [x[1] for x in os.walk(os.path.dirname(__file__))][0]
def sources():
try:
sourceDict = []
for i in __all__:
for loader, module_name, is_pkg in pkgutil.walk_packages([os.path.join(os.path.dirname(__file__), i)]):
if is_pkg:
continue
try:
module = loader.find_module(module_name).load_module(module_name)
sourceDict.append((module_name, module.source()))
except Exception as e:
log_utils.log('Could not load "%s": %s' % (module_name, e), log_utils.LOGDEBUG)
return sourceDict
except:
return []
| {
"content_hash": "91f812d8a07a366bb66d87c6600f8b95",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 115,
"avg_line_length": 33.47727272727273,
"alnum_prop": 0.6279701289884589,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "7223e7bdc6683154ce3e57aca164fc7968877f19",
"size": "1498",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "script.module.exodus/lib/resources/lib/sources/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import unittest
import logging
from testing import integration_test as integration_test
# Network used in this test
from networks import sample_network as network
class TestCmd(integration_test.IntegrationTestCase):
def setUp(self):
# TestConfiguration
self.CONTROLLER_PATH = '../../controller/routing_switch.py'
self.NETWORK = network.Network
super(TestCmd, self).setUp()
def test_execCmd(self):
res = self.execCmd("h1", "echo 'Test'")
self.assertEqual(res, "Test", "Executing command on host is working.")
if __name__ == '__main__':
unittest.main() | {
"content_hash": "5dea267c483ce24678b7e01ce151df03",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 26.77777777777778,
"alnum_prop": 0.681881051175657,
"repo_name": "Nephelo/sdn-test-framework",
"id": "d017a1d28e25b33aac6687b1ff7c357dac961e88",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework_tests/test_cmd.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "43150"
}
],
"symlink_target": ""
} |
import logging
from hiredis import ReplyError
from models.base import db, Base
import models.node
import models.proxy
from models.proxy import TYPE_CORVUS
class StatsBase(Base):
__abstract__ = True
addr = db.Column('addr', db.String(255), unique=True, nullable=False)
poll_count = db.Column('poll_count', db.Integer, nullable=False)
avail_count = db.Column('avail_count', db.Integer, nullable=False)
def __init__(self, *args, **kwargs):
Base.__init__(self, *args, **kwargs)
self.init()
def init(self):
self.suppress_alert = 1
self.details = {}
self.app = None
self.typename = ''
if len(self.addr) > 0 and self.addr.find(':') > 0:
self.host, port = self.addr.split(':')
self.port = int(port)
else:
self.host = None
self.port = None
self.details['host'] = self.host
self.details['port'] = self.port
def get_endpoint(self):
raise NotImplementedError()
@classmethod
def get_by(cls, host, port):
addr = '%s:%d' % (host, port)
n = db.session.query(cls).filter(cls.addr == addr).first()
if n is None:
n = cls(addr=addr, poll_count=0, avail_count=0)
db.session.add(n)
db.session.flush()
n.init()
return n
def set_available(self):
self.avail_count += 1
self.poll_count += 1
self.details['stat'] = True
self.details['sla'] = self.sla()
def set_unavailable(self):
self.poll_count += 1
self.details['stat'] = False
self.details['sla'] = self.sla()
def get(self, key, default=None):
return self.details.get(key, default)
def sla(self):
if self.poll_count == 0:
return 0
return float(self.avail_count) / self.poll_count
def stats_data(self):
raise NotImplementedError()
def _collect_stats(self):
raise NotImplementedError()
def collect_stats(self):
try:
self._collect_stats()
self.app.stats_write(self.addr, self.stats_data())
except (IOError, ValueError, LookupError, ReplyError) as e:
logging.exception(e)
self.set_unavailable()
self.send_alarm(
'%s failed: %s:%d - %s' % (
self.typename, self.host, self.port, e), e)
def send_alarm(self, message, exception):
ep = self.get_endpoint()
if self.suppress_alert != 1 and ep is not None:
self.app.send_alarm(ep, message, exception)
def add_to_db(self):
db.session.add(self)
class RedisStatsBase(StatsBase):
__tablename__ = 'redis_node_status'
def __init__(self, *args, **kwargs):
StatsBase.__init__(self, *args, **kwargs)
def init(self):
StatsBase.init(self)
self.typename = 'Redis'
def get_endpoint(self):
return models.node.get_by_host_port(self.host, self.port)
class ProxyStatsBase(StatsBase):
__tablename__ = 'proxy_status'
def __init__(self, *args, **kwargs):
StatsBase.__init__(self, *args, **kwargs)
def init(self):
StatsBase.init(self)
proxy = self.get_endpoint()
if proxy.proxy_type == TYPE_CORVUS:
self.typename = 'Corvus'
else:
self.typename = 'Cerberus'
def get_endpoint(self):
return models.proxy.get_by_host_port(self.host, self.port)
| {
"content_hash": "ffd18890288f3f0dfde75fd03f98924d",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 73,
"avg_line_length": 28.341463414634145,
"alnum_prop": 0.5699942627653471,
"repo_name": "HunanTV/redis-ctl",
"id": "d09847017a6f2319f0298936343c4794e888c9bc",
"size": "3486",
"binary": false,
"copies": "1",
"ref": "refs/heads/v0.9",
"path": "models/stats_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19525"
},
{
"name": "HTML",
"bytes": "153585"
},
{
"name": "JavaScript",
"bytes": "29218"
},
{
"name": "Makefile",
"bytes": "1978"
},
{
"name": "Python",
"bytes": "156198"
}
],
"symlink_target": ""
} |
import numpy
import six
import chainer
from chainer.backends import _chainerx
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
import chainerx
# Aliases
from chainer._backend import Device
from chainer.backends._chainerx import ChainerxDevice
from chainer.backends._chainerx import from_chx # NOQA
from chainer.backends._chainerx import to_chx # NOQA
from chainer.backends._cpu import CpuDevice
from chainer.backends.cuda import GpuDevice
from chainer.backends.intel64 import Intel64Device
from chainer import types # NOQA
def _contains_nan(x):
"""Returns whether the input array has NaN values.
Args:
x (numpy.ndarray or cupy.ndarray): Array to be checked.
Returns:
bool: True if the input has NaN values.
"""
if x.dtype.kind in ('f', 'c'):
device = get_device_from_array(x)
with chainer.using_device(device):
return device.xp.isnan(x).any()
else:
return False
def copyto(dst, src):
"""Copies the elements of an ndarray to those of another one.
This function can copy the CPU/GPU arrays to the destination arrays on
another device.
Args:
dst (:class:`numpy.ndarray`, :class:`cupy.ndarray`, \
:class:`ideep4py.mdarray` or :class:`chainerx.ndarray`):
Destination array.
src (:class:`numpy.ndarray`, :class:`cupy.ndarray`, \
:class:`ideep4py.mdarray` or :class:`chainerx.ndarray`):
Source array.
"""
if isinstance(dst, chainerx.ndarray):
dst[...] = _chainerx._array_to_chainerx(src, dst.device)
return
if isinstance(src, chainerx.ndarray):
src = from_chx(src)
if isinstance(dst, numpy.ndarray):
numpy.copyto(dst, _cpu._to_cpu(src))
elif isinstance(dst, intel64.mdarray):
intel64.ideep.basic_copyto(
dst, _cpu._to_cpu(src))
elif isinstance(dst, cuda.ndarray):
if isinstance(src, chainer.get_cpu_array_types()):
src = numpy.asarray(src)
if dst.flags.c_contiguous or dst.flags.f_contiguous:
dst.set(src)
else:
cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
elif isinstance(src, cuda.ndarray):
cuda.cupy.copyto(dst, src)
else:
raise TypeError('cannot copy from non-array object of type {}'
.format(type(src)))
else:
raise TypeError('cannot copy to non-array object of type {}'.format(
type(dst)))
def _guess_device_from_array_module(xp):
"""Returns a plausible device from array module
.. warning::
There can be multiple devices for a module
"""
if xp is cuda.cupy:
return cuda.GpuDevice(cuda.Device())
elif xp is chainerx:
return _chainerx.ChainerxDevice(chainerx.get_default_device())
else:
# Cannot detect intel64, because xp of intel64 is numpy.
return _cpu.CpuDevice()
def get_device(device_spec):
# type: (types.DeviceSpec) -> Device
"""Returns a device object.
Args:
device_spec (object): Device specifier.
If a :class:`chainer.backend.Device` instance is given, it is
returned intact. Otherwise the following values are supported:
* ChainerX devices
* A string representing a device.
(ex. ``'native:0'``, ``'native'``)
* A :class:`chainerx.Device` object.
* CuPy
* A string starts with ``'@cupy:'``.
(ex. ``'@cupy:0'``)
* A :class:`cupy.cuda.Device` object.
* NumPy
* The string ``'@numpy'``.
* NumPy with Intel Architecture
* The string ``'@intel64'``.
"""
if isinstance(device_spec, Device):
return device_spec
if isinstance(device_spec, cuda._integer_types):
return _get_device_cupy_or_numpy(device_spec)
if chainerx.is_available() and isinstance(device_spec, chainerx.Device):
return _chainerx.ChainerxDevice(device_spec)
if cuda.available and isinstance(device_spec, cuda.Device):
return cuda.GpuDevice(device_spec)
if isinstance(device_spec, six.string_types):
# '-1', '0', '1', ...
try:
int_device_spec = int(device_spec)
except ValueError:
pass
else:
return _get_device_cupy_or_numpy(int_device_spec)
if device_spec.startswith('@'):
# '@module:...'
mod_name, colon, precise_spec = device_spec[1:].partition(':')
if mod_name == 'numpy':
if not colon:
return _cpu.CpuDevice()
elif mod_name == 'cupy':
if colon:
return cuda.GpuDevice.from_device_id(int(precise_spec))
elif mod_name == 'intel64':
if not colon:
return intel64.Intel64Device()
raise ValueError(
'Device specifiers starting with \'@\' must be followed by'
' a module name and depending on the module, module specific'
' precise device specifiers. Actual: {}'.format(device_spec))
else:
# String device specifier without '@' prefix is assumed to be a
# ChainerX device.
if not chainerx.is_available():
raise RuntimeError(
'Tried to parse ChainerX device specifier \'{}\', '
'but ChainerX is not available. '
'Note that device specifiers without \'@\' prefix are '
'assumed to be ChainerX device '
'specifiers.'.format(device_spec))
return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))
raise TypeError(
'Device specifier must be a backend.Device, cuda.Device,'
' chainerx.Device, integer or a string. Actual: {}'.format(
type(device_spec)))
def _get_device_cupy_or_numpy(device_spec):
# legacy spec of (gpu) device
if device_spec >= 0:
return cuda.GpuDevice.from_device_id(device_spec)
else:
return _cpu.CpuDevice()
def using_device(device_spec):
"""Context manager to apply the thread-local device state.
Args:
device_spec (object): Device specifier. See :func:`chainer.get_device`
for details.
.. admonition:: Example
.. testcode::
:skipif: doctest_helper.skipif_not_enough_cuda_devices(2)
with chainer.using_device('@cupy:1'):
a = cupy.empty((3, 2))
assert a.device.id == 1
"""
# TODO(niboshi): Set default device (once this concept is introduced in
# Chainer).
device = get_device(device_spec)
return device.create_context()
def get_array_module(*args):
"""Gets an appropriate NumPy-compatible module to process arguments
This function will return their data arrays' array module for
:class:`~chainer.Variable` arguments.
Args:
args: Values to determine whether NumPy, CuPy, or ChainerX should be
used.
Returns:
module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based
on the types of the arguments.
"""
is_chainerx_available = chainerx.is_available()
if is_chainerx_available or cuda.available:
arrays = []
for arg in args:
# Unwrap arrays
if isinstance(arg, chainer.variable.Variable):
array = arg.data
else:
array = arg
if is_chainerx_available and isinstance(array, chainerx.ndarray):
return chainerx
arrays.append(array)
if cuda.available:
return cuda.cupy.get_array_module(*arrays)
return numpy
def get_device_from_array(*arrays):
"""Gets the device from arrays.
The device on which the given array reside is returned.
.. note::
Unlike :func:`get_array_module`, this method does not recognize
:class:`~chainer.Variable` objects.
If you need to get device from the :class:`~chainer.Variable` instance
``v``, you need to use ``get_device_from_array(v.array)``.
Args:
arrays (array or list of arrays):
Arrays to determine the device. If multiple arrays are given, the
device correspoinding to the first array which is not NumPy array
is returned.
Returns:
chainer.backend.Device: Device instance.
"""
for array in arrays:
device = GpuDevice.from_array(array)
if device is not None:
return device
if isinstance(array, chainerx.ndarray):
return ChainerxDevice(array.device)
device = Intel64Device.from_array(array)
if device is not None:
return device
return CpuDevice()
| {
"content_hash": "08729c949cbfea46ebbd2d49d7fac284",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 31.724381625441698,
"alnum_prop": 0.5970149253731343,
"repo_name": "wkentaro/chainer",
"id": "a4f835eda07524fc4a473fbd0c8667257483b77c",
"size": "8978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "1662966"
},
{
"name": "CMake",
"bytes": "50912"
},
{
"name": "Cuda",
"bytes": "178765"
},
{
"name": "Dockerfile",
"bytes": "3316"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6041757"
},
{
"name": "Shell",
"bytes": "41813"
}
],
"symlink_target": ""
} |
import requests
import logging
from six.moves.urllib.parse import urljoin
from torrt.base_rpc import BaseRPC, TorrtRPCException
from torrt.utils import RPCClassesRegistry, make_soup
LOGGER = logging.getLogger(__name__)
class UTorrentRPC(BaseRPC):
"""See http://www.utorrent.com/community/developers/webapi for protocol spec details.
idle sign: What a shame - uTorrent API is a load of mess.
"""
alias = 'utorrent'
token_page_path = 'token.html'
def __init__(self, url=None, host='localhost', port=8080, user=None, password=None, enabled=False):
self.cookies = {}
self.user = user
self.password = password
self.enabled = enabled
self.host = host
self.port = port
self.csrf_token = ''
if url is not None:
self.url = url
else:
self.url = 'http://%s:%s/gui/' % (host, port)
def login(self):
try:
response = requests.get(
urljoin(self.url, self.token_page_path),
auth=(self.user, self.password),
cookies=self.cookies
)
self.csrf_token = make_soup(response.text).find(id='token').text
if not self.csrf_token:
raise UTorrentRPCException('Unable to fetch CSRF token.')
self.cookies = response.cookies
except Exception as e:
LOGGER.error('Failed to login using `%s` RPC: %s', self.url, e.message)
raise UTorrentRPCException(e.message)
def build_params(self, action=None, params=None):
document = {'action': action}
if params is not None:
document.update(params)
return document
def get_request_url(self, params):
rest = []
join = lambda l: '&'.join(l)
for param_name, param_val in params.items():
if param_val is None:
continue
val = param_val
if isinstance(param_val, list):
val = join(param_val)
rest.append('%s=%s' % (param_name, val))
return '%s?token=%s&%s' % (self.url, self.csrf_token, join(rest))
def query(self, data, files=None):
LOGGER.debug('RPC action `%s` ...', data['action'] or 'list')
if not self.cookies:
self.login()
url = self.get_request_url(data)
request_kwargs = {
'cookies': self.cookies
}
method = requests.get
if files is not None:
method = requests.post
request_kwargs['files'] = files
try:
response = method(url, auth=(self.user, self.password), **request_kwargs)
if response.status_code != 200:
raise UTorrentRPCException(response.text.strip())
except Exception as e:
LOGGER.error('Failed to query RPC `%s`: %s', url, e.message)
raise UTorrentRPCException(e.message)
response = response.json()
return response
def method_get_torrents(self, hashes=None):
result = self.query(self.build_params(params={'list': 1}))
torrents_info = {}
for torrent_data in result['torrents']:
if hashes is None or torrent_data[0] in hashes:
torrents_info[torrent_data[0]] = {
'hash': torrent_data[0],
'name': torrent_data[2],
'download_to': torrent_data[26]
}
return torrents_info
def method_add_torrent(self, torrent, download_to=None):
# NB: `download_to` is ignored, as existing API approach to it is crippled.
file_data = {'torrent_file': ('from_torrt.torrent', torrent)}
return self.query(self.build_params(action='add-file'), file_data)
def method_remove_torrent(self, hash_str, with_data=False):
action = 'remove'
if with_data:
action = 'removedata'
self.query(self.build_params(action=action, params={'hash': hash_str}))
return True
def method_get_version(self):
result = self.query(self.build_params(action='getversion'))
return result['version']['ui_version']
class UTorrentRPCException(TorrtRPCException):
""""""
RPCClassesRegistry.add(UTorrentRPC)
| {
"content_hash": "c7acf0574628a0647c11a5e895cfd612",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 103,
"avg_line_length": 32.64885496183206,
"alnum_prop": 0.5770399812953004,
"repo_name": "toshka/torrt",
"id": "2a70e16030c9e31385727d869973ba17d1508c35",
"size": "4277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torrt/rpc/utorrent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "112408"
}
],
"symlink_target": ""
} |
"""Implements a singleton."""
import contextlib
import logging
import os
import sys
if sys.platform == 'win32':
import ctypes
else:
import fcntl
class Singleton(object):
"""Creates an global singleton that can be held by only one process on the
host.
On Windows, uses a global Mutex. On others, use a flock'ed file.
"""
def __init__(self, rootdir):
rootdir = os.path.realpath(rootdir)
self.handle = None
if sys.platform == 'win32':
# Use the directory name without '\\'. Enforce lowercase.
self.key = 'Global\\' + rootdir.replace('\\', '_').lower()
else:
self.key = os.path.join(rootdir, 'swarming.lck')
def acquire(self):
"""Tries to acquire the singleton.
Returns:
True if there was no previous process, False if this process is a
duplicate and should exit.
"""
if sys.platform == 'win32':
# Create a global mutex. Make the mutex so that it disapear automatically
# when the process dies. The handle is not inherited so task_runner
# doesn't get to keep it alive.
# pylint: disable=undefined-variable
self.handle = ctypes.windll.kernel32.CreateMutexW(
ctypes.c_int(0), ctypes.c_int(-1),
ctypes.create_unicode_buffer(self.key))
last_error = ctypes.GetLastError()
logging.info('[singleton] acquire: %s = %s ; %s', self.key, self.handle,
last_error)
if not self.handle:
return False
# ERROR_ALREADY_EXISTS
if last_error == 183:
self.release()
return bool(self.handle)
self.handle = open(self.key, 'a+b')
try:
fcntl.flock(self.handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# There's a small race condition where it could report a previous pid.
logging.exception('Singleton "%s" is held by "%s"', self.key,
self.handle.read())
self.handle.close()
self.handle = None
return False
logging.info('[singleton] acquire: %s = %s', self.key, self.handle)
self.handle.seek(0, os.SEEK_SET)
self.handle.truncate(0)
self.handle.write(str(os.getpid()).encode('utf-8'))
self.handle.flush()
return True
def release(self):
"""Release the singleton."""
if not self.handle:
return
if sys.platform == 'win32':
# pylint: disable=undefined-variable
ctypes.windll.kernel32.CloseHandle(self.handle)
else:
self.handle.close()
try:
os.remove(self.key)
except (IOError, OSError):
pass
self.handle = None
@contextlib.contextmanager
def singleton(rootdir):
s = Singleton(rootdir)
acquired = s.acquire()
try:
yield acquired
finally:
if acquired:
s.release()
| {
"content_hash": "78d10704bb2e1fb8702b3e17393a29f5",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 28.810526315789474,
"alnum_prop": 0.628425283156741,
"repo_name": "luci/luci-py",
"id": "62d26e00bcef3cb0007e3436bfc36d8d3dd2f6c4",
"size": "2911",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "appengine/swarming/swarming_bot/bot_code/singleton.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
import msrest.serialization
from ._edm import Collection, ComplexType, String
from .._generated.models import (
SearchField as _SearchField,
SearchIndex as _SearchIndex,
PatternTokenizer as _PatternTokenizer,
)
from ._models import (
pack_analyzer,
unpack_analyzer,
PatternTokenizer,
SearchResourceEncryptionKey,
)
if TYPE_CHECKING:
from typing import Any, Dict, List
__all__ = ("ComplexField", "SearchableField", "SimpleField")
class SearchField(msrest.serialization.Model):
# pylint: disable=too-many-instance-attributes
"""Represents a field in an index definition, which describes the name, data type, and search behavior of a field.
All required parameters must be populated in order to send to Azure.
:keyword name: Required. The name of the field, which must be unique within the fields collection
of the index or parent field.
:paramtype name: str
:keyword type: Required. The data type of the field. Possible values include: "Edm.String",
"Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset",
"Edm.GeographyPoint", "Edm.ComplexType".
:paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType
:keyword key: A value indicating whether the field uniquely identifies documents in the index.
Exactly one top-level field in each index must be chosen as the key field and it must be of
type Edm.String. Key fields can be used to look up documents directly and update or delete
specific documents. Default is false for simple fields and null for complex fields.
:paramtype key: bool
:keyword hidden: A value indicating whether the field can be returned in a search result.
You can enable this option if you want to use a field (for example, margin) as a filter,
sorting, or scoring mechanism but do not want the field to be visible to the end user. This
property must be False for key fields, and it must be null for complex fields. This property can
be changed on existing fields. Enabling this property does not cause any increase in index
storage requirements. Default is False for simple fields and null for complex fields.
:paramtype hidden: bool
:keyword searchable: A value indicating whether the field is full-text searchable. This means it
will undergo analysis such as word-breaking during indexing. If you set a searchable field to a
value like "sunny day", internally it will be split into the individual tokens "sunny" and
"day". This enables full-text searches for these terms. Fields of type Edm.String or
Collection(Edm.String) are searchable by default. This property must be false for simple fields
of other non-string data types, and it must be null for complex fields. Note: searchable fields
consume extra space in your index since Azure Cognitive Search will store an additional
tokenized version of the field value for full-text searches. If you want to save space in your
index and you don't need a field to be included in searches, set searchable to false.
:paramtype searchable: bool
:keyword filterable: A value indicating whether to enable the field to be referenced in $filter
queries. filterable differs from searchable in how strings are handled. Fields of type
Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so
comparisons are for exact matches only. For example, if you set such a field f to "sunny day",
$filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property
must be null for complex fields. Default is true for simple fields and null for complex fields.
:paramtype filterable: bool
:keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
users will want to sort by fields in the documents. A simple field can be sortable only if it
is single-valued (it has a single value in the scope of the parent document). Simple collection
fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex
collections are also multi-valued, and therefore cannot be sortable. This is true whether it's
an immediate parent field, or an ancestor field, that's the complex collection. Complex fields
cannot be sortable and the sortable property must be null for such fields. The default for
sortable is true for single-valued simple fields, false for multi-valued simple fields, and
null for complex fields.
:paramtype sortable: bool
:keyword facetable: A value indicating whether to enable the field to be referenced in facet
queries. Typically used in a presentation of search results that includes hit count by category
(for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or
Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple
fields.
:paramtype facetable: bool
:keyword analyzer_name: The name of the analyzer to use for the field. This option can be used only
with searchable fields and it can't be set together with either searchAnalyzer or
indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null
for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene",
"bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-
Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft",
"cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene",
"en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft",
"fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene",
"gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene",
"is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene",
"ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft",
"lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft",
"no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-
PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft",
"ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft",
"es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft",
"th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft",
"vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern",
"simple", "stop", "whitespace".
:paramtype analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:keyword search_analyzer_name: The name of the analyzer used at search time for the field. This option
can be used only with searchable fields. It must be set together with indexAnalyzer and it
cannot be set together with the analyzer option. This property cannot be set to the name of a
language analyzer; use the analyzer property instead if you need a language analyzer. This
analyzer can be updated on an existing field. Must be null for complex fields. Possible values
include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft",
"bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-
Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft",
"da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft",
"fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft",
"de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft",
"hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene",
"ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft",
"ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft",
"ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft",
"pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene",
"pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr-
cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:paramtype search_analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:keyword index_analyzer_name: The name of the analyzer used at indexing time for the field. This
option can be used only with searchable fields. It must be set together with searchAnalyzer and
it cannot be set together with the analyzer option. This property cannot be set to the name of
a language analyzer; use the analyzer property instead if you need a language analyzer. Once
the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields.
Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene",
"bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-
Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft",
"cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft",
"en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene",
"gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft",
"he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft",
"id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft",
"ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene",
"lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene",
"fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft",
"pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr-
cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft",
"es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft",
"th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft",
"standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop",
"whitespace".
:paramtype index_analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName
:keyword normalizer_name: The name of the normalizer to use for the field. This option can be used
only with fields with filterable, sortable, or facetable enabled. Once the normalizer is
chosen, it cannot be changed for the field. Must be null for complex fields. Possible values
include: "asciifolding", "elision", "lowercase", "standard", "uppercase".
:paramtype normalizer_name: str or ~azure.search.documents.indexes.models.LexicalNormalizerName
:keyword synonym_map_names: A list of the names of synonym maps to associate with this field. This
option can be used only with searchable fields. Currently only one synonym map per field is
supported. Assigning a synonym map to a field ensures that query terms targeting that field are
expanded at query-time using the rules in the synonym map. This attribute can be changed on
existing fields. Must be null or an empty collection for complex fields.
:paramtype synonym_map_names: list[str]
:keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or
Collection(Edm.ComplexType). Must be null or empty for simple fields.
:paramtype fields: list[~azure.search.documents.models.SearchField]
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"key": {"key": "key", "type": "bool"},
"hidden": {"key": "hidden", "type": "bool"},
"searchable": {"key": "searchable", "type": "bool"},
"filterable": {"key": "filterable", "type": "bool"},
"sortable": {"key": "sortable", "type": "bool"},
"facetable": {"key": "facetable", "type": "bool"},
"analyzer_name": {"key": "analyzerName", "type": "str"},
"search_analyzer_name": {"key": "searchAnalyzerName", "type": "str"},
"index_analyzer_name": {"key": "indexAnalyzerName", "type": "str"},
"normalizer_name": {"key": "normalizerName", "type": "str"},
"synonym_map_names": {"key": "synonymMapNames", "type": "[str]"},
"fields": {"key": "fields", "type": "[SearchField]"},
}
def __init__(self, **kwargs):
super(SearchField, self).__init__(**kwargs)
self.name = kwargs["name"]
self.type = kwargs["type"]
self.key = kwargs.get("key", None)
self.hidden = kwargs.get("hidden", None)
self.searchable = kwargs.get("searchable", None)
self.filterable = kwargs.get("filterable", None)
self.sortable = kwargs.get("sortable", None)
self.facetable = kwargs.get("facetable", None)
self.analyzer_name = kwargs.get("analyzer_name", None)
self.search_analyzer_name = kwargs.get("search_analyzer_name", None)
self.index_analyzer_name = kwargs.get("index_analyzer_name", None)
self.normalizer_name = kwargs.get("normalizer_name", None)
self.synonym_map_names = kwargs.get("synonym_map_names", None)
self.fields = kwargs.get("fields", None)
def _to_generated(self):
fields = [pack_search_field(x) for x in self.fields] if self.fields else None
retrievable = not self.hidden if self.hidden is not None else None
return _SearchField(
name=self.name,
type=self.type,
key=self.key,
retrievable=retrievable,
searchable=self.searchable,
filterable=self.filterable,
sortable=self.sortable,
facetable=self.facetable,
analyzer=self.analyzer_name,
search_analyzer=self.search_analyzer_name,
index_analyzer=self.index_analyzer_name,
normalizer=self.normalizer_name,
synonym_maps=self.synonym_map_names,
fields=fields,
)
@classmethod
def _from_generated(cls, search_field):
if not search_field:
return None
# pylint:disable=protected-access
fields = (
[SearchField._from_generated(x) for x in search_field.fields]
if search_field.fields
else None
)
hidden = (
not search_field.retrievable
if search_field.retrievable is not None
else None
)
try:
normalizer = search_field.normalizer_name
except AttributeError:
normalizer = None
return cls(
name=search_field.name,
type=search_field.type,
key=search_field.key,
hidden=hidden,
searchable=search_field.searchable,
filterable=search_field.filterable,
sortable=search_field.sortable,
facetable=search_field.facetable,
analyzer_name=search_field.analyzer,
search_analyzer_name=search_field.search_analyzer,
index_analyzer_name=search_field.index_analyzer,
normalizer_name=normalizer,
synonym_map_names=search_field.synonym_maps,
fields=fields,
)
def SimpleField(**kw):
# type: (**Any) -> SearchField
"""Configure a simple field for an Azure Search Index
:keyword name: Required. The name of the field, which must be unique within the fields collection
of the index or parent field.
:paramtype name: str
:keyword type: Required. The data type of the field. Possible values include: SearchFieldDataType.String,
SearchFieldDataType.Int32, SearchFieldDataType.Int64, SearchFieldDataType.Double, SearchFieldDataType.Boolean,
SearchFieldDataType.DateTimeOffset, SearchFieldDataType.GeographyPoint, SearchFieldDataType.ComplexType,
from `azure.search.documents.SearchFieldDataType`.
:paramtype type: str
:keyword key: A value indicating whether the field uniquely identifies documents in the index.
Exactly one top-level field in each index must be chosen as the key field and it must be of
type SearchFieldDataType.String. Key fields can be used to look up documents directly and
update or delete specific documents. Default is False
:paramtype key: bool
:keyword hidden: A value indicating whether the field can be returned in a search result.
You can enable this option if you want to use a field (for example, margin) as a filter,
sorting, or scoring mechanism but do not want the field to be visible to the end user. This
property must be False for key fields. This property can be changed on existing fields.
Enabling this property does not cause any increase in index storage requirements. Default is
False.
:paramtype hidden: bool
:keyword filterable: A value indicating whether to enable the field to be referenced in $filter
queries. filterable differs from searchable in how strings are handled. Fields of type
SearchFieldDataType.String or Collection(SearchFieldDataType.String) that are filterable do
not undergo word-breaking, so comparisons are for exact matches only. For example, if you
set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but
$filter=f eq 'sunny day' will. This property must be null for complex fields. Default is False
:paramtype filterable: bool
:keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
users will want to sort by fields in the documents. A simple field can be sortable only if it
is single-valued (it has a single value in the scope of the parent document). Simple collection
fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex
collections are also multi-valued, and therefore cannot be sortable. This is true whether it's
an immediate parent field, or an ancestor field, that's the complex collection. The default is
False.
:paramtype sortable: bool
:keyword facetable: A value indicating whether to enable the field to be referenced in facet
queries. Typically used in a presentation of search results that includes hit count by category
(for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
on). Fields of type SearchFieldDataType.GeographyPoint or
Collection(SearchFieldDataType.GeographyPoint) cannot be facetable. Default is False.
:paramtype facetable: bool
"""
result = {"name": kw.get("name"), "type": kw.get("type")} # type: Dict[str, Any]
result["key"] = kw.get("key", False)
result["searchable"] = False
result["filterable"] = kw.get("filterable", False)
result["facetable"] = kw.get("facetable", False)
result["sortable"] = kw.get("sortable", False)
result["hidden"] = kw.get("hidden", False)
return SearchField(**result)
def SearchableField(**kw):
# type: (**Any) -> SearchField
"""Configure a searchable text field for an Azure Search Index
:keyword name: Required. The name of the field, which must be unique within the fields collection
of the index or parent field.
:paramtype name: str
:keyword collection: Whether this search field is a collection (default False)
:paramtype collection: bool
:keyword key: A value indicating whether the field uniquely identifies documents in the index.
Exactly one top-level field in each index must be chosen as the key field and it must be of
type SearchFieldDataType.String. Key fields can be used to look up documents directly and update or delete
specific documents. Default is False
:paramtype key: bool
:keyword hidden: A value indicating whether the field can be returned in a search result.
You can enable this option if you want to use a field (for example, margin) as a filter,
sorting, or scoring mechanism but do not want the field to be visible to the end user. This
property must be False for key fields. This property can be changed on existing fields.
Enabling this property does not cause any increase in index storage requirements. Default is
False.
:paramtype hidden: bool
:keyword searchable: A value indicating whether the field is full-text searchable. This means it
will undergo analysis such as word-breaking during indexing. If you set a searchable field to a
value like "sunny day", internally it will be split into the individual tokens "sunny" and
"day". This enables full-text searches for these terms. Note: searchable fields
consume extra space in your index since Azure Cognitive Search will store an additional
tokenized version of the field value for full-text searches. If you want to save space in your
index and you don't need a field to be included in searches, set searchable to false. Default
is True.
:paramtype searchable: bool
:keyword filterable: A value indicating whether to enable the field to be referenced in $filter
queries. filterable differs from searchable in how strings are handled. Fields that are
filterable do not undergo word-breaking, so comparisons are for exact matches only. For example,
if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but
$filter=f eq 'sunny day' will. Default is False.
:paramtype filterable: bool
:keyword sortable: A value indicating whether to enable the field to be referenced in $orderby
expressions. By default Azure Cognitive Search sorts results by score, but in many experiences
users will want to sort by fields in the documents. The default is true False.
:paramtype sortable: bool
:keyword facetable: A value indicating whether to enable the field to be referenced in facet
queries. Typically used in a presentation of search results that includes hit count by category
(for example, search for digital cameras and see hits by brand, by megapixels, by price, and so
on). Default is False.
:paramtype facetable: bool
:keyword analyzer_name: The name of the analyzer to use for the field. This option can't be set together
with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed
for the field. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene',
'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-
Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft',
'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene',
'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft',
'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene',
'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene',
'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene',
'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft',
'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft',
'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-
PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft',
'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft',
'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft',
'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft',
'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern',
'simple', 'stop', 'whitespace'.
:paramtype analyzer_name: str or ~azure.search.documents.indexes.models.AnalyzerName
:keyword search_analyzer_name: The name of the analyzer used at search time for the field. It must be
set together with indexAnalyzer and it cannot be set together with the analyzer option. This
property cannot be set to the name of a language analyzer; use the analyzer property instead
if you need a language analyzer. This analyzer can be updated on an existing field. Possible
values include:
'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft',
'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh-
Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft',
'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft',
'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft',
'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft',
'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene',
'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft',
'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft',
'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft',
'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene',
'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr-
cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft',
'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft',
'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft',
'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop',
'whitespace'.
:paramtype search_analyzer_name: str or ~azure.search.documents.indexes.models.AnalyzerName
:keyword index_analyzer_name: The name of the analyzer used at indexing time for the field.
It must be set together with searchAnalyzer and it cannot be set together with the analyzer
option. This property cannot be set to the name of a language analyzer; use the analyzer
property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be
changed for the field. Possible values include:
'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene',
'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-
Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft',
'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft',
'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene',
'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft',
'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft',
'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft',
'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene',
'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene',
'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft',
'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr-
cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft',
'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft',
'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft',
'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop',
'whitespace'.
:paramtype index_analyzer_name: str or ~azure.search.documents.indexes.models.AnalyzerName
:keyword synonym_map_names: A list of the names of synonym maps to associate with this field. Currently
only one synonym map per field is supported. Assigning a synonym map to a field ensures that
query terms targeting that field are expanded at query-time using the rules in the synonym map.
This attribute can be changed on existing fields.
:paramtype synonym_map_names: list[str]
"""
typ = Collection(String) if kw.get("collection", False) else String
result = {"name": kw.get("name"), "type": typ} # type: Dict[str, Any]
result["key"] = kw.get("key", False)
result["searchable"] = kw.get("searchable", True)
result["filterable"] = kw.get("filterable", False)
result["facetable"] = kw.get("facetable", False)
result["sortable"] = kw.get("sortable", False)
result["hidden"] = kw.get("hidden", False)
if "analyzer_name" in kw:
result["analyzer_name"] = kw["analyzer_name"]
if "search_analyzer_name" in kw:
result["search_analyzer_name"] = kw["search_analyzer_name"]
if "index_analyzer_name" in kw:
result["index_analyzer_name"] = kw["index_analyzer_name"]
if "synonym_map_names" in kw:
result["synonym_map_names"] = kw["synonym_map_names"]
return SearchField(**result)
def ComplexField(**kw):
# type: (**Any) -> SearchField
"""Configure a Complex or Complex collection field for an Azure Search
Index
:keyword name: Required. The name of the field, which must be unique within the fields collection
of the index or parent field.
:paramtype name: str
:keyword collection: Whether this complex field is a collection (default False)
:paramtype collection: bool
:paramtype type: str or ~search_service_client.models.DataType
:keyword fields: A list of sub-fields
:paramtype fields: list[~search_service_client.models.Field]
"""
typ = Collection(ComplexType) if kw.get("collection", False) else ComplexType
result = {"name": kw.get("name"), "type": typ} # type: Dict[str, Any]
result["fields"] = kw.get("fields")
return SearchField(**result)
class SearchIndex(msrest.serialization.Model):
# pylint: disable=too-many-instance-attributes
"""Represents a search index definition, which describes the fields and search behavior of an index.
All required parameters must be populated in order to send to Azure.
:keyword name: Required. The name of the index.
:paramtype name: str
:keyword fields: Required. The fields of the index.
:paramtype fields: list[~azure.search.documents.indexes.models.SearchField]
:keyword scoring_profiles: The scoring profiles for the index.
:paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile]
:keyword default_scoring_profile: The name of the scoring profile to use if none is specified in
the query. If this property is not set and no scoring profile is specified in the query, then
default scoring (tf-idf) will be used.
:paramtype default_scoring_profile: str
:keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index.
:paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions
:keyword suggesters: The suggesters for the index.
:paramtype suggesters: list[~azure.search.documents.indexes.models.SearchSuggester]
:keyword analyzers: The analyzers for the index.
:paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer]
:keyword tokenizers: The tokenizers for the index.
:paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer]
:keyword token_filters: The token filters for the index.
:paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter]
:keyword char_filters: The character filters for the index.
:paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter]
:keyword normalizers: The normalizers for the index.
:paramtype normalizers:
list[~azure.search.documents.indexes.models.LexicalNormalizer]
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:keyword similarity: The type of similarity algorithm to be used when scoring and ranking the
documents matching a search query. The similarity algorithm can only be defined at index
creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity
algorithm is used.
:paramtype similarity: ~azure.search.documents.indexes.models.SimilarityAlgorithm
:keyword semantic_settings: Defines parameters for a search index that influence semantic capabilities.
:paramtype semantic_settings: ~azure.search.documents.indexes.models.SemanticSettings
:keyword e_tag: The ETag of the index.
:paramtype e_tag: str
"""
_validation = {
"name": {"required": True},
"fields": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"fields": {"key": "fields", "type": "[SearchField]"},
"scoring_profiles": {"key": "scoringProfiles", "type": "[ScoringProfile]"},
"default_scoring_profile": {"key": "defaultScoringProfile", "type": "str"},
"cors_options": {"key": "corsOptions", "type": "CorsOptions"},
"suggesters": {"key": "suggesters", "type": "[SearchSuggester]"},
"analyzers": {"key": "analyzers", "type": "[LexicalAnalyzer]"},
"tokenizers": {"key": "tokenizers", "type": "[LexicalTokenizer]"},
"token_filters": {"key": "tokenFilters", "type": "[TokenFilter]"},
"char_filters": {"key": "charFilters", "type": "[CharFilter]"},
"normalizers": {"key": "normalizers", "type": "[LexicalNormalizer]"},
"encryption_key": {
"key": "encryptionKey",
"type": "SearchResourceEncryptionKey",
},
"similarity": {"key": "similarity", "type": "SimilarityAlgorithm"},
"semantic_settings": {"key": "semantic", "type": "SemanticSettings"},
"e_tag": {"key": "@odata\\.etag", "type": "str"},
}
def __init__(self, **kwargs):
super(SearchIndex, self).__init__(**kwargs)
self.name = kwargs["name"]
self.fields = kwargs["fields"]
self.scoring_profiles = kwargs.get("scoring_profiles", None)
self.default_scoring_profile = kwargs.get("default_scoring_profile", None)
self.cors_options = kwargs.get("cors_options", None)
self.suggesters = kwargs.get("suggesters", None)
self.analyzers = kwargs.get("analyzers", None)
self.tokenizers = kwargs.get("tokenizers", None)
self.token_filters = kwargs.get("token_filters", None)
self.char_filters = kwargs.get("char_filters", None)
self.normalizers = kwargs.get("normalizers", None)
self.encryption_key = kwargs.get("encryption_key", None)
self.similarity = kwargs.get("similarity", None)
self.semantic_settings = kwargs.get("semantic_settings", None)
self.e_tag = kwargs.get("e_tag", None)
def _to_generated(self):
if self.analyzers:
analyzers = [
pack_analyzer(x) for x in self.analyzers # type: ignore
] # mypy: ignore
else:
analyzers = None
if self.tokenizers:
tokenizers = [
x._to_generated() # pylint:disable=protected-access
if isinstance(x, PatternTokenizer)
else x
for x in self.tokenizers
]
else:
tokenizers = None
if self.fields:
fields = [pack_search_field(x) for x in self.fields]
else:
fields = None
return _SearchIndex(
name=self.name,
fields=fields,
scoring_profiles=self.scoring_profiles,
default_scoring_profile=self.default_scoring_profile,
cors_options=self.cors_options,
suggesters=self.suggesters,
analyzers=analyzers,
tokenizers=tokenizers,
token_filters=self.token_filters,
char_filters=self.char_filters,
normalizers=self.normalizers,
# pylint:disable=protected-access
encryption_key=self.encryption_key._to_generated()
if self.encryption_key
else None,
similarity=self.similarity,
semantic_settings=self.semantic_settings,
e_tag=self.e_tag,
)
@classmethod
def _from_generated(cls, search_index):
if not search_index:
return None
if search_index.analyzers:
analyzers = [
unpack_analyzer(x) for x in search_index.analyzers # type: ignore
]
else:
analyzers = None
if search_index.tokenizers:
tokenizers = [
PatternTokenizer._from_generated(x) # pylint:disable=protected-access
if isinstance(x, _PatternTokenizer)
else x
for x in search_index.tokenizers
]
else:
tokenizers = None
if search_index.fields:
fields = [
SearchField._from_generated(x) for x in search_index.fields # pylint:disable=protected-access
]
else:
fields = None
try:
normalizers = search_index.normalizers
except AttributeError:
normalizers = None
return cls(
name=search_index.name,
fields=fields,
scoring_profiles=search_index.scoring_profiles,
default_scoring_profile=search_index.default_scoring_profile,
cors_options=search_index.cors_options,
suggesters=search_index.suggesters,
analyzers=analyzers,
tokenizers=tokenizers,
token_filters=search_index.token_filters,
char_filters=search_index.char_filters,
normalizers=normalizers,
# pylint:disable=protected-access
encryption_key=SearchResourceEncryptionKey._from_generated(
search_index.encryption_key
),
similarity=search_index.similarity,
semantic_settings=search_index.semantic_settings,
e_tag=search_index.e_tag,
)
def pack_search_field(search_field):
# type: (SearchField) -> _SearchField
if not search_field:
return None
if isinstance(search_field, dict):
name = search_field.get("name")
field_type = search_field.get("type")
key = search_field.get("key")
hidden = search_field.get("hidden")
searchable = search_field.get("searchable")
filterable = search_field.get("filterable")
sortable = search_field.get("sortable")
facetable = search_field.get("facetable")
analyzer_name = search_field.get("analyzer_name")
search_analyzer_name = search_field.get("search_analyzer_name")
index_analyzer_name = search_field.get("index_analyzer_name")
normalizer = search_field.get("normalizer")
synonym_map_names = search_field.get("synonym_map_names")
fields = search_field.get("fields")
fields = [pack_search_field(x) for x in fields] if fields else None
return _SearchField(
name=name,
type=field_type,
key=key,
retrievable=not hidden,
searchable=searchable,
filterable=filterable,
sortable=sortable,
facetable=facetable,
analyzer=analyzer_name,
search_analyzer=search_analyzer_name,
index_analyzer=index_analyzer_name,
normalizer=normalizer,
synonym_maps=synonym_map_names,
fields=fields,
)
return search_field._to_generated() # pylint:disable=protected-access
| {
"content_hash": "0f7e3af94d0c0a1b9c4be55b9483ab21",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 118,
"avg_line_length": 60.27285714285714,
"alnum_prop": 0.6763764783958665,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d803d85a69cbb6b83b2aee2669cc9f167258c2da",
"size": "42501",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Test the Aurora ABB PowerOne Solar PV sensors."""
from datetime import timedelta
from unittest.mock import patch
from aurorapy.client import AuroraError
import pytest
from homeassistant.components.aurora_abb_powerone.const import (
ATTR_DEVICE_NAME,
ATTR_FIRMWARE,
ATTR_MODEL,
ATTR_SERIAL_NUMBER,
DEFAULT_INTEGRATION_TITLE,
DOMAIN,
)
from homeassistant.components.aurora_abb_powerone.sensor import AuroraSensor
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_ADDRESS, CONF_PORT
from homeassistant.exceptions import InvalidStateError
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
assert_setup_component,
async_fire_time_changed,
)
TEST_CONFIG = {
"sensor": {
"platform": "aurora_abb_powerone",
"device": "/dev/fakedevice0",
"address": 2,
}
}
def _simulated_returns(index, global_measure=None):
returns = {
3: 45.678, # power
21: 9.876, # temperature
}
return returns[index]
def _mock_config_entry():
return MockConfigEntry(
version=1,
domain=DOMAIN,
title=DEFAULT_INTEGRATION_TITLE,
data={
CONF_PORT: "/dev/usb999",
CONF_ADDRESS: 3,
ATTR_DEVICE_NAME: "mydevicename",
ATTR_MODEL: "mymodel",
ATTR_SERIAL_NUMBER: "123456",
ATTR_FIRMWARE: "1.2.3.4",
},
source="dummysource",
entry_id="13579",
)
async def test_setup_platform_valid_config(hass):
"""Test that (deprecated) yaml import still works."""
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=_simulated_returns,
), assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", TEST_CONFIG)
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power
assert power.state == "45.7"
# try to set up a second time - should abort.
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=TEST_CONFIG,
context={"source": SOURCE_IMPORT},
)
assert result["type"] == "abort"
assert result["reason"] == "already_setup"
async def test_sensors(hass):
"""Test data coming back from inverter."""
mock_entry = _mock_config_entry()
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=_simulated_returns,
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power
assert power.state == "45.7"
temperature = hass.states.get("sensor.temperature")
assert temperature
assert temperature.state == "9.9"
async def test_sensor_invalid_type(hass):
"""Test invalid sensor type during setup."""
entities = []
mock_entry = _mock_config_entry()
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=_simulated_returns,
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
client = hass.data[DOMAIN][mock_entry.unique_id]
data = mock_entry.data
with pytest.raises(InvalidStateError):
entities.append(AuroraSensor(client, data, "WrongSensor", "wrongparameter"))
async def test_sensor_dark(hass):
"""Test that darkness (no comms) is handled correctly."""
mock_entry = _mock_config_entry()
utcnow = dt_util.utcnow()
# sun is up
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure", side_effect=_simulated_returns
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power is not None
assert power.state == "45.7"
# sunset
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=AuroraError("No response after 10 seconds"),
):
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power.state == "unknown"
# sun rose again
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure", side_effect=_simulated_returns
):
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power is not None
assert power.state == "45.7"
# sunset
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=AuroraError("No response after 10 seconds"),
):
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power.state == "unknown" # should this be 'available'?
async def test_sensor_unknown_error(hass):
"""Test other comms error is handled correctly."""
mock_entry = _mock_config_entry()
with patch("aurorapy.client.AuroraSerialClient.connect", return_value=None), patch(
"aurorapy.client.AuroraSerialClient.measure",
side_effect=AuroraError("another error"),
):
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
power = hass.states.get("sensor.power_output")
assert power is None
| {
"content_hash": "3a88b2a98296f572e8b0822c3061c167",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 87,
"avg_line_length": 34.34054054054054,
"alnum_prop": 0.6633086730678419,
"repo_name": "aronsky/home-assistant",
"id": "26486c6a11666bf63bba9dc7116f06981ccd773f",
"size": "6353",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/aurora_abb_powerone/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""Contains routines which will add a backup cron job to prepare Oppia for a
deployment push to https://oppia.org/.
"""
import sys
import utils
_BACKUP_NAME_PREFIX = 'opbkp'
_BACKUP_EVENT_QUEUE_NAME = 'backups'
_BACKUP_EVENT_QUEUE_RATE = '5/s'
_MAX_BACKUP_URL_LENGTH = 2000
_CRON_YAML_FILE_NAME = 'cron.yaml'
_OMITTED_MODELS = [
'JobModel', 'ContinuousComputationModel', 'FeedbackAnalyticsModel',
'ExplorationRecommendationsModel', 'TopicSimilaritiesModel',
'ExplorationAnnotationsModel', 'StateAnswersCalcOutputModel',
'UserRecentChangesBatchModel', 'UserStatsModel']
def generate_backup_url(cloud_storage_bucket_name, module_class_names):
return (
'/_ah/datastore_admin/backup.create?name=%s&kind=%s&queue=%s'
'&filesystem=gs&gs_bucket_name=%s' % (
_BACKUP_NAME_PREFIX,
'&kind='.join(module_class_names),
_BACKUP_EVENT_QUEUE_NAME,
cloud_storage_bucket_name))
def update_cron_dict(cron_dict):
sys_args = sys.argv
cloud_storage_bucket_name = sys_args[1]
module_class_names = [
module_name for module_name in sys_args[2:]
if module_name not in _OMITTED_MODELS]
# TODO(bhenning): Consider improving this to avoid generating a backup URL
# for each tested subset of module_class_names.
bucketed_module_class_names = []
backup_urls = []
for module_class_name in module_class_names:
latest_bucket = []
potential_bucket = [module_class_name]
if bucketed_module_class_names:
latest_bucket = bucketed_module_class_names[-1]
backup_url = generate_backup_url(
cloud_storage_bucket_name, latest_bucket + potential_bucket)
if not bucketed_module_class_names or len(
backup_url) >= _MAX_BACKUP_URL_LENGTH:
bucketed_module_class_names.append(potential_bucket)
backup_urls.append(backup_url)
else:
bucketed_module_class_names[-1] += potential_bucket
backup_urls[-1] = backup_url
for idx, backup_url in enumerate(backup_urls):
cron_dict['cron'].append({
'description': 'weekly backup (part %d/%d)' % (
idx + 1, len(backup_urls)),
'url': '%s' % backup_url,
'schedule': 'every thursday 09:00',
'target': 'ah-builtin-python-bundle'
})
def get_cron_dict():
return utils.dict_from_yaml(utils.get_file_contents(_CRON_YAML_FILE_NAME))
def save_cron_dict(cron_dict):
with open(_CRON_YAML_FILE_NAME, 'wt') as cron_yaml_file:
cron_yaml_file.write(utils.yaml_from_dict(cron_dict))
def update_yaml_files():
cron_dict = get_cron_dict()
update_cron_dict(cron_dict)
save_cron_dict(cron_dict)
def _prepare_for_prod():
update_yaml_files()
if __name__ == '__main__':
_prepare_for_prod()
| {
"content_hash": "75ca7ab7c6621ad17cdb8594260b4285",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 78,
"avg_line_length": 34.01190476190476,
"alnum_prop": 0.6387819390969548,
"repo_name": "himanshu-dixit/oppia",
"id": "be10dfd2fe676cb66dde3de27bd82682a70d5526",
"size": "3480",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/prepare_automatic_backups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "101439"
},
{
"name": "HTML",
"bytes": "899603"
},
{
"name": "JavaScript",
"bytes": "2950299"
},
{
"name": "Python",
"bytes": "3818679"
},
{
"name": "Shell",
"bytes": "47818"
}
],
"symlink_target": ""
} |
from dateutil.relativedelta import relativedelta
from django.db.models import Q
from django.utils import timezone
three_years = relativedelta(years=3)
five_years = relativedelta(years=5)
one_hundred_years = relativedelta(years=100)
one_month = relativedelta(months=1)
now = timezone.now()
stout = (
(Q(style__name__icontains='stout') | Q(style__parent__name__icontains='stout')
& Q(date_produced__range=(now - three_years, now - five_years))
)
ipa = Q(date_produced__gte=(now - one_month)) & Q(ibu__gte=90)
sour = Q(time_to_produce__gte=three_years)
belgian = (
(Q(style__name__icontains='belgian') | Q(style__parent__name__icontains='belgian'))
& Q(brewery__country='Belgium') & Q(brewery__established__lte=(now - one_hundred_years))
)
best_beers = (
(stout | ipa | sour | belgian) & Q(avg_rating__gte=4.5)
& ~Q(brewery__name__icontains='inbev')
)
beers = Beer.objects.filter(best_beers)
| {
"content_hash": "0619428bb4445c7e7a130dabe2372ad4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 92,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6869565217391305,
"repo_name": "djangophx/beer-tracker",
"id": "13c3738694ebccfa05434cf3eb7251d699951260",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracker/complex_query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1457"
},
{
"name": "Python",
"bytes": "12723"
}
],
"symlink_target": ""
} |
"""adodbapi -- a pure Python PEP 249 DB-API package using Microsoft ADO
Adodbapi can be run on CPython version 2.7,
or IronPython version 2.6 and later,
or Python 3.5 and later (after filtering through 2to3.py)
"""
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: SQL
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Database
"""
NAME = 'adodbapi'
MAINTAINER = "Vernon Cole"
MAINTAINER_EMAIL = "vernondcole@gmail.com"
DESCRIPTION = """A pure Python package implementing PEP 249 DB-API using Microsoft ADO."""
URL = "http://sourceforge.net/projects/adodbapi"
LICENSE = 'LGPL'
CLASSIFIERS = filter(None, CLASSIFIERS.split('\n'))
AUTHOR = "Henrik Ekelund, Vernon Cole, et.al."
AUTHOR_EMAIL = "vernondcole@gmail.com"
PLATFORMS = ["Windows","Linux"]
VERSION = None # in case searching for version fails
a = open('adodbapi.py') # find the version string in the source code
for line in a:
if '__version__' in line:
VERSION = line.split("'")[1]
print(('adodbapi version="%s"' % VERSION))
break
a.close()
##DOWNLOAD_URL = "http://sourceforge.net/projects/adodbapi/files/adodbapi/" + VERSION.rsplit('.', 1)[0] + '/adodbapi-' + VERSION + '.zip'
import sys
def setup_package():
from distutils.core import setup
if sys.version_info >= (3, 0):
try:
from distutils.command.build_py import build_py_2to3 as build_py
## # exclude fixers that break already compatible code
## from lib2to3.refactor import get_fixers_from_package
## fixers = get_fixers_from_package('lib2to3.fixes')
## for skip_fixer in ['import']:
## fixers.remove('lib2to3.fixes.fix_' + skip_fixer)
## build_py.fixer_names = fixers
except ImportError:
raise ImportError("build_py_2to3 not found in distutils - it is required for Python 3.x")
else:
from distutils.command.build_py import build_py
setup(
cmdclass = {'build_py': build_py},
name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
url=URL,
keywords='database ado odbc dbapi db-api Microsoft SQL',
## download_url=DOWNLOAD_URL,
long_description=open('README.txt').read(),
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
package_dir = {'adodbapi':''},
packages=['adodbapi'] )
return
if __name__ == '__main__':
setup_package()
| {
"content_hash": "0520d2a49c45602ff45aadd2b5587a2e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 137,
"avg_line_length": 35.523809523809526,
"alnum_prop": 0.6400804289544236,
"repo_name": "sserrot/champion_relationships",
"id": "d0fc6ae8ef6473ab714b939d2e6d09941480f6ce",
"size": "2984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/adodbapi/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foos', '0004_auto_20170124_1434'),
]
operations = [
migrations.AddField(
model_name='singlesgame',
name='player1_end_rating',
field=models.IntegerField(default=1000),
),
migrations.AddField(
model_name='singlesgame',
name='player1_start_rating',
field=models.IntegerField(default=1000),
),
migrations.AddField(
model_name='singlesgame',
name='player2_end_rating',
field=models.IntegerField(default=1000),
),
migrations.AddField(
model_name='singlesgame',
name='player2_start_rating',
field=models.IntegerField(default=1000),
),
]
| {
"content_hash": "d225a99f4005bd76e1096f54c9d78370",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 52,
"avg_line_length": 27.454545454545453,
"alnum_prop": 0.5695364238410596,
"repo_name": "Magicked/tabletracker",
"id": "f459a32779eb3dfd223a36924f16fb40a7919502",
"size": "979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foos/migrations/0005_auto_20170124_1615.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45820"
},
{
"name": "HTML",
"bytes": "11799"
},
{
"name": "JavaScript",
"bytes": "97593"
},
{
"name": "Python",
"bytes": "37241"
}
],
"symlink_target": ""
} |
"""
Radar chart
"""
from __future__ import division
from pygal.graph.line import Line
from pygal.adapters import positive, none_to_zero
from pygal.view import PolarView, PolarLogView
from pygal.util import deg, cached_property, compute_scale, is_major
from math import cos, pi
class Radar(Line):
"""Kiviat graph"""
_adapters = [positive, none_to_zero]
def __init__(self, *args, **kwargs):
self.x_pos = None
self._rmax = None
super(Radar, self).__init__(*args, **kwargs)
def _fill(self, values):
return values
def _get_value(self, values, i):
return self._format(values[i][0])
@cached_property
def _values(self):
if self.interpolate:
return [val[0] for serie in self.series
for val in serie.interpolated]
else:
return super(Line, self)._values
def _set_view(self):
if self.logarithmic:
view_class = PolarLogView
else:
view_class = PolarView
self.view = view_class(
self.width - self.margin.x,
self.height - self.margin.y,
self._box)
def _x_axis(self, draw_axes=True):
if not self._x_labels:
return
axis = self.svg.node(self.nodes['plot'], class_="axis x web")
format_ = lambda x: '%f %f' % x
center = self.view((0, 0))
r = self._rmax
for label, theta in self._x_labels:
guides = self.svg.node(axis, class_='guides')
end = self.view((r, theta))
self.svg.node(
guides, 'path',
d='M%s L%s' % (format_(center), format_(end)),
class_='line')
r_txt = (1 - self._box.__class__.margin) * self._box.ymax
pos_text = self.view((r_txt, theta))
text = self.svg.node(
guides, 'text',
x=pos_text[0],
y=pos_text[1])
text.text = label
angle = - theta + pi / 2
if cos(angle) < 0:
angle -= pi
text.attrib['transform'] = 'rotate(%f %s)' % (
deg(angle), format_(pos_text))
def _y_axis(self, draw_axes=True):
if not self._y_labels:
return
axis = self.svg.node(self.nodes['plot'], class_="axis y web")
for label, r in reversed(self._y_labels):
major = is_major(r)
guides = self.svg.node(axis, class_='guides')
self.svg.line(
guides, [self.view((r, theta)) for theta in self.x_pos],
close=True,
class_='%sguide line' % (
'major ' if major else ''))
x, y = self.view((r, self.x_pos[0]))
self.svg.node(
guides, 'text',
x=x - 5,
y=y,
class_='major' if major else ''
).text = label
def _compute(self):
delta = 2 * pi / self._len if self._len else 0
x_pos = [.5 * pi + i * delta for i in range(self._len + 1)]
for serie in self.series:
serie.points = [
(v, x_pos[i])
for i, v in enumerate(serie.values)]
if self.interpolate:
extend = 2
extended_x_pos = (
[.5 * pi + i * delta for i in range(-extend, 0)] +
x_pos +
[.5 * pi + i * delta for i in range(
self._len + 1, self._len + 1 + extend)])
extended_vals = (serie.values[-extend:] +
serie.values +
serie.values[:extend])
serie.interpolated = self._interpolate(
extended_vals, extended_x_pos, polar=True)
# x labels space
self._box.margin *= 2
self._rmin = self.zero
self._rmax = self._max or 1
self._box.set_polar_box(self._rmin, self._rmax)
y_pos = compute_scale(
self._rmin, self._rmax, self.logarithmic, self.order_min,
max_scale=8
) if not self.y_labels else map(int, self.y_labels)
self._x_labels = self.x_labels and zip(self.x_labels, x_pos)
self._y_labels = zip(map(self._format, y_pos), y_pos)
self.x_pos = x_pos
self._self_close = True
| {
"content_hash": "0b7e2a5b367048b5d54af6c8d474bd71",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 72,
"avg_line_length": 32.75373134328358,
"alnum_prop": 0.48712690817953974,
"repo_name": "vineethguna/heroku-buildpack-libsandbox",
"id": "54b6e63f8ed4b49ec2a5a9039d882412d9f6ccde",
"size": "5157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/pygal-0.13.0/pygal/graph/radar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "1393"
},
{
"name": "Python",
"bytes": "20214"
},
{
"name": "Ruby",
"bytes": "0"
},
{
"name": "Shell",
"bytes": "13182"
}
],
"symlink_target": ""
} |
"""
A lightweight, pure Python, numpy compliant ndarray class.
The documenation in this module is rather compact. For details on each
function, see the corresponding documentation at:
http://docs.scipy.org/doc/numpy/reference/index.html Be aware that the
behavior of tinynumpy may deviate in some ways from numpy, or that
certain features may not be supported.
"""
# todo: keep track of readonly better
# todo: mathematical operators
# todo: more methods?
# todo: logspace, meshgrid
# todo: Fortran order?
from __future__ import division
import sys
import ctypes
# Python 2/3 compat
if sys.version_info >= (3, ):
xrange = range
# Define version numer
__version__ = '0.0.1dev'
# Define dtypes: struct name, short name, numpy name, ctypes type
_dtypes = [('B', 'b1', 'bool', ctypes.c_bool),
('b', 'i1', 'int8', ctypes.c_int8),
('B', 'u1', 'uint8', ctypes.c_uint8),
('h', 'i2', 'int16', ctypes.c_int16),
('H', 'u2', 'uint16', ctypes.c_uint16),
('i', 'i4', 'int32', ctypes.c_int32),
('I', 'u4', 'uint32', ctypes.c_uint32),
('q', 'i8', 'int64', ctypes.c_int64),
('Q', 'u8', 'uint64', ctypes.c_uint64),
('f', 'f4', 'float32', ctypes.c_float),
('d', 'f8', 'float64', ctypes.c_double),
]
# Inject common dtype names
_known_dtypes = [d[2] for d in _dtypes]
for d in _known_dtypes:
globals()[d] = d
newaxis = None
def _convert_dtype(dtype, to='numpy'):
""" Convert dtype, if could not find, pass as it was.
"""
if dtype is None:
return dtype
dtype = str(dtype)
index = {'array':0, 'short':1, 'numpy':2, 'ctypes':3}[to]
for dd in _dtypes:
if dtype in dd:
return dd[index]
return dtype # Otherwise return original
def _ceildiv(a, b):
return -(-a // b)
def _get_step(view):
""" Return step to walk over array. If 1, the array is fully
C-contiguous. If 0, the striding is such that one cannot
step through the array.
"""
cont_strides = _strides_for_shape(view.shape, view.itemsize)
step = view.strides[-1] // cont_strides[-1]
corrected_strides = tuple([i * step for i in cont_strides])
almost_cont = view.strides == corrected_strides
if almost_cont:
return step
else:
return 0 # not contiguous
def _strides_for_shape(shape, itemsize):
strides = []
stride_product = 1
for s in reversed(shape):
strides.append(stride_product)
stride_product *= s
return tuple([i * itemsize for i in reversed(strides)])
def _size_for_shape(shape):
stride_product = 1
for s in shape:
stride_product *= s
return stride_product
def squeeze_strides(s):
""" Pop strides for singular dimensions. """
return tuple([s[0]] + [s[i] for i in range(1, len(s)) if s[i] != s[i-1]])
def _shape_from_object(obj):
shape = []
# todo: make more efficient, use len() etc
def _shape_from_object_r(index, element, axis):
try:
for i, e in enumerate(element):
_shape_from_object_r(i, e, axis+1)
while len(shape) <= axis:
shape.append(0)
l = i + 1
s = shape[axis]
if l > s:
shape[axis] = l
except TypeError:
pass
_shape_from_object_r(0, obj, 0)
return tuple(shape)
def _assign_from_object(array, obj):
key = []
# todo: make more efficient, especially the try-except
def _assign_from_object_r(element):
try:
for i, e in enumerate(element):
key.append(i)
_assign_from_object_r(e)
key.pop()
except TypeError:
array[tuple(key)] = element
_assign_from_object_r(obj)
def _increment_mutable_key(key, shape):
for axis in reversed(xrange(len(shape))):
key[axis] += 1
if key[axis] < shape[axis]:
return True
if axis == 0:
return False
key[axis] = 0
def _key_for_index(index, shape):
key = []
cumshape = [1]
for i in reversed(shape):
cumshape.insert(0, cumshape[0] * i)
for s in cumshape[1:-1]:
n = index // s
key.append(n)
index -= n * s
key.append(index)
return tuple(key)
def _zerositer(n):
for i in xrange(n):
yield 0
## Public functions
def array(obj, dtype=None, copy=True, order=None):
""" array(obj, dtype=None, copy=True, order=None)
Create a new array. If obj is an ndarray, and copy=False, a view
of that array is returned. For details see:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.array.html
"""
dtype = _convert_dtype(dtype)
if isinstance(obj, ndarray):
# From existing array
a = obj.view()
if dtype is not None and dtype != a.dtype:
a = a.astype(dtype)
elif copy:
a = a.copy()
return a
if hasattr(obj, '__array_interface__'):
# From something that looks like an array, we can create
# the ctypes array for this and use that as a buffer
D = obj.__array_interface__
# Get dtype
dtype_orig = _convert_dtype(D['typestr'][1:])
# Create array
if D['strides']:
itemsize = int(D['typestr'][-1])
bufsize = D['strides'][0] * D['shape'][0] // itemsize
else:
bufsize = _size_for_shape(D['shape'])
BufType = (_convert_dtype(dtype_orig, 'ctypes') * bufsize)
buffer = BufType.from_address(D['data'][0])
a = ndarray(D['shape'], dtype_orig,
buffer=buffer, strides=D['strides'], order=order)
# Convert or copy?
if dtype is not None and dtype != dtype_orig:
a = a.astype(dtype)
elif copy:
a = a.copy()
return a
else:
# From some kind of iterable
shape = _shape_from_object(obj)
# Try to derive dtype
if dtype is None:
el = obj
while isinstance(el, (tuple, list)) and el:
el = el[0]
if isinstance(el, int):
dtype = 'int64'
# Create array
a = ndarray(shape, dtype, order=None)
_assign_from_object(a, obj)
return a
def zeros_like(a, dtype=None, order=None):
""" Return an array of zeros with the same shape and type as a given array.
"""
dtype = a.dtype if dtype is None else dtype
return zeros(a.shape, dtype, order)
def ones_like(a, dtype=None, order=None):
""" Return an array of ones with the same shape and type as a given array.
"""
dtype = a.dtype if dtype is None else dtype
return ones(a.shape, dtype, order)
def empty_like(a, dtype=None, order=None):
""" Return a new array with the same shape and type as a given array.
"""
dtype = a.dtype if dtype is None else dtype
return empty(a.shape, dtype, order)
def zeros(shape, dtype=None, order=None):
"""Return a new array of given shape and type, filled with zeros
"""
return empty(shape, dtype, order)
def ones(shape, dtype=None, order=None):
"""Return a new array of given shape and type, filled with ones
"""
a = empty(shape, dtype, order)
a.fill(1)
return a
def empty(shape, dtype=None, order=None):
"""Return a new array of given shape and type, without initializing entries
"""
return ndarray(shape, dtype, order=order)
def arange(*args, **kwargs):
""" arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
"""
# Get dtype
dtype = kwargs.pop('dtype', None)
if kwargs:
x = list(kwargs.keys())[0]
raise TypeError('arange() got an unexpected keyword argument %r' % x)
# Parse start, stop, step
if len(args) == 0:
raise TypeError('Required argument "start" not found')
elif len(args) == 1:
start, stop, step = 0, int(args[0]), 1
elif len(args) == 2:
start, stop, step = int(args[0]), int(args[1]), 1
elif len(args) == 3:
start, stop, step = int(args[0]), int(args[1]), int(args[2])
else:
raise TypeError('Too many input arguments')
# Init
iter = xrange(start, stop, step)
a = empty((len(iter),), dtype=dtype)
a[:] = list(iter)
return a
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
""" linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)
Return evenly spaced numbers over a specified interval. Returns num
evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
"""
# Prepare
start, stop = float(start), float(stop)
ra = stop - start
if endpoint:
step = ra / (num-1)
else:
step = ra / num
# Create
a = empty((num,), dtype)
a[:] = [start + i * step for i in xrange(num)]
# Return
if retstep:
return a, step
else:
return a
## The class
class ndarray(object):
""" ndarray(shape, dtype='float64', buffer=None, offset=0,
strides=None, order=None)
Array class similar to numpy's ndarray, implemented in pure Python.
This class can be distinguished from a real numpy array in that
the repr always shows the dtype as a string, and for larger arrays
(more than 100 elements) it shows a short one-line repr.
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type property describes the
format of each element in the array.
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
Parameters
----------
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object contaning data, optional
Used to fill the array with data. If another ndarray is given,
the underlying data is used. Can also be a ctypes.Array or any
object that exposes the buffer interface.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional NOT SUPPORTED
Row-major or column-major order.
Attributes
----------
T : ndarray
Transpose of the array. In tinynumpy only supported for ndim <= 3.
data : buffer
The array's elements, in memory. In tinynumpy this is a ctypes array.
dtype : str
Describes the format of the elements in the array. In tinynumpy
this is a string.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : iterator object
Flattened version of the array as an iterator. In tinynumpy
the iterator cannot be indexed.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
__array_interface__ : dict
Dictionary with low level array information. Used by numpy to
turn into a real numpy array. Can also be used to give C libraries
access to the data via ctypes.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
Notes
-----
There are two modes of creating an array:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
"""
__slots__ = ['_dtype', '_shape', '_strides', '_itemsize',
'_offset', '_base', '_data']
def __init__(self, shape, dtype='float64', buffer=None, offset=0,
strides=None, order=None):
# Check order
if order is not None:
raise RuntimeError('ndarray order parameter is not supported')
# Check and set shape
assert isinstance(shape, tuple)
assert all([isinstance(x, int) for x in shape])
self._shape = shape
# Check and set dtype
dtype = _convert_dtype(dtype) if (dtype is not None) else 'float64'
if dtype not in _known_dtypes:
raise TypeError('data type %r not understood' % dtype)
self._dtype = dtype
# Itemsize is directly derived from dtype
self._itemsize = int(_convert_dtype(dtype, 'short')[-1])
if buffer is None:
# New array
self._base = None
# Check and set offset and strides
assert offset == 0
self._offset = 0
assert strides is None
self._strides = _strides_for_shape(self._shape, self.itemsize)
else:
# Existing array
if isinstance(buffer, ndarray) and buffer.base is not None:
buffer = buffer.base
# Keep a reference to avoid memory cleanup
self._base = buffer
# for ndarray we use the data property
if isinstance(buffer, ndarray):
buffer = buffer.data
# Check and set offset
assert isinstance(offset, int) and offset >= 0
self._offset = offset
# Check and set strides
if strides is None:
strides = _strides_for_shape(shape, self.itemsize)
assert isinstance(strides, tuple)
assert all([isinstance(x, int) for x in strides])
assert len(strides) == len(shape)
self._strides = strides
# Define our buffer class
buffersize = self._strides[0] * self._shape[0] // self._itemsize
buffersize += self._offset
BufferClass = _convert_dtype(dtype, 'ctypes') * buffersize
# Create buffer
if buffer is None:
self._data = BufferClass()
elif isinstance(buffer, ctypes.Array):
self._data = BufferClass.from_address(ctypes.addressof(buffer))
else:
self._data = BufferClass.from_buffer(buffer)
@property
def __array_interface__(self):
""" Allow converting to real numpy array, or pass pointer to C library
http://docs.scipy.org/doc/numpy/reference/arrays.interface.html
"""
readonly = False
# typestr
typestr = '<' + _convert_dtype(self.dtype, 'short')
# Pointer
if isinstance(self._data, ctypes.Array):
ptr = ctypes.addressof(self._data)
elif hasattr(self._data, '__array_interface__'):
ptr, readonly = self._data.__array_interface__['data']
elif hasattr(self._data, 'buffer_info'): # Python's array.array
ptr = self._data.buffer_info()[0]
elif isinstance(self._data, bytes):
ptr = ctypes.cast(self._data, ctypes.c_void_p).value
readonly = True
else:
raise TypeError('Cannot get address to underlying array data')
ptr += self._offset * self.itemsize
#
return dict(version=3,
shape=self.shape,
typestr=typestr,
descr=[('', typestr)],
data=(ptr, readonly),
strides=self.strides,
#offset=self._offset,
#mask=None,
)
def __len__(self):
return self.size
def __getitem__(self, key):
offset, shape, strides = self._index_helper(key)
if not shape:
# Return scalar
return self._data[offset]
else:
# Return view
return ndarray(shape, self.dtype,
offset=offset, strides=strides, buffer=self)
def __setitem__(self, key, value):
# Get info for view
offset, shape, strides = self._index_helper(key)
# Is this easy?
if not shape:
self._data[offset] = value
return
# Create view to set data to
view = ndarray(shape, self.dtype,
offset=offset, strides=strides, buffer=self)
# Get data to set as a list (because getting slices from ctype
# arrays yield lists anyway). The list is our "contiguous array"
if isinstance(value, (float, int)):
value_list = [value] * view.size
elif isinstance(value, (tuple, list)):
value_list = value
else:
if not isinstance(value, ndarray):
value = array(value, copy=False)
value_list = value._toflatlist()
# Check if size match
if view.size != len(value_list):
raise ValueError('Number of elements in source does not match '
'number of elements in target.')
# Assign data in most efficient way that we can. This code
# looks for the largest semi-contiguous block: the block that
# we can access as a 1D array with a stepsize.
subviews = [view]
value_index = 0
count = 0
while subviews:
subview = subviews.pop(0)
step = _get_step(subview)
if step:
block = value_list[value_index:value_index+subview.size]
s = slice(subview._offset,
subview._offset + subview.size * step,
step)
view._data[s] = block
value_index += subview.size
count += 1
else:
for i in range(subview.shape[0]):
subviews.append(subview[i])
assert value_index == len(value_list)
def __float__(self):
if self.size == 1:
return float(self.data[self._offset])
else:
raise TypeError('Only length-1 arrays can be converted to scalar')
def __int__(self):
if self.size == 1:
return int(self.data[self._offset])
else:
raise TypeError('Only length-1 arrays can be converted to scalar')
def __repr__(self):
# If more than 100 elements, show short repr
if self.size > 100:
shapestr = 'x'.join([str(i) for i in self.shape])
return '<ndarray %s %s at 0x%x>' % (shapestr, self.dtype, id(self))
# Otherwise, try to show in nice way
def _repr_r(s, axis, offset):
axisindent = min(2, max(0, (self.ndim - axis - 1)))
if axis < len(self.shape):
s += '['
for k_index, k in enumerate(xrange(self.shape[axis])):
if k_index > 0:
s += ('\n ' + ' ' * axis) * axisindent
offset_ = offset + k * self._strides[axis] // self.itemsize
s = _repr_r(s, axis+1, offset_)
if k_index < self.shape[axis] - 1:
s += ', '
s += ']'
else:
r = repr(self.data[offset])
if '.' in r:
r = ' ' + r
if r.endswith('.0'):
r = r[:-1]
s += r
return s
s = _repr_r('', 0, self._offset)
return "array(" + s + ", dtype='%s')" % self.dtype
def __eq__(self, other):
if other.__module__.split('.')[0] == 'numpy':
return other == self
else:
out = empty(self.shape, 'bool')
out[:] = [i1==i2 for (i1, i2) in zip(self.flat, other.flat)]
return out
## Private helper functions
def _index_helper(self, key):
# Indexing spec is located at:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
# Promote to tuple.
if not isinstance(key, tuple):
key = (key,)
axis = 0
shape = []
strides = []
offset = self._offset
for k in key:
axissize = self._shape[axis]
if isinstance(k, int):
if k >= axissize:
raise IndexError('index %i is out of bounds for axis %i '
'with size %s' % (k, axis, axissize))
offset += k * self._strides[axis] // self.itemsize
axis += 1
elif isinstance(k, slice):
start, stop, step = k.indices(self.shape[axis])
shape.append(_ceildiv(stop - start, step))
strides.append(step * self._strides[axis])
offset += start * self._strides[axis] // self.itemsize
axis += 1
elif k is Ellipsis:
raise TypeError("ellipsis are not supported.")
elif k is None:
shape.append(1)
strides.append(0)
else:
raise TypeError("key elements must be instaces of int or slice.")
shape.extend(self.shape[axis:])
strides.extend(self._strides[axis:])
return offset, tuple(shape), tuple(strides)
def _toflatlist(self):
value_list = []
subviews = [self]
count = 0
while subviews:
subview = subviews.pop(0)
step = _get_step(subview)
if step:
s = slice(subview._offset,
subview._offset + subview.size * step,
step)
value_list += self._data[s]
count += 1
else:
for i in range(subview.shape[0]):
subviews.append(subview[i])
return value_list
## Properties
@property
def ndim(self):
return len(self._shape)
@property
def size(self):
return _size_for_shape(self._shape)
@property
def nbytes(self):
return _size_for_shape(self._shape) * self.itemsize
def _get_shape(self):
return self._shape
def _set_shape(self, newshape):
if newshape == self.shape:
return
if self.size != _size_for_shape(newshape):
raise ValueError('Total size of new array must be unchanged')
if _get_step(self) == 1:
# Contiguous, hooray!
self._shape = tuple(newshape)
self._strides = _strides_for_shape(self._shape, self.itemsize)
return
# Else, try harder ... This code supports adding /removing
# singleton dimensions. Although it may sometimes be possible
# to split a dimension in two if the contiguous blocks allow
# this, we don't bother with such complex cases for now.
# Squeeze shape / strides
N = self.ndim
shape = [self.shape[i] for i in range(N) if self.shape[i] > 1]
strides = [self.strides[i] for i in range(N) if self.shape[i] > 1]
# Check if squeezed shapes match
newshape_ = [newshape[i] for i in range(len(newshape))
if newshape[i] > 1]
if newshape_ != shape:
raise AttributeError('incompatible shape for non-contiguous array')
# Modify to make this data work in loop
strides.append(strides[-1])
shape.append(1)
# Form new strides
i = -1
newstrides = []
try:
for s in reversed(newshape):
if s == 1:
newstrides.append(strides[i] * shape[i])
else:
i -= 1
newstrides.append(strides[i])
except IndexError:
# Fail
raise AttributeError('incompatible shape for non-contiguous array')
else:
# Success
newstrides.reverse()
self._shape = tuple(newshape)
self._strides = tuple(newstrides)
shape = property(_get_shape, _set_shape) # Python 2.5 compat (e.g. Jython)
@property
def strides(self):
return self._strides
@property
def dtype(self):
return self._dtype
@property
def itemsize(self):
return self._itemsize
@property
def base(self):
return self._base
@property
def data(self):
return self._data
@property
def flat(self):
subviews = [self]
count = 0
while subviews:
subview = subviews.pop(0)
step = _get_step(subview)
if step:
s = slice(subview._offset,
subview._offset + subview.size * step,
step)
for i in self._data[s]:
yield i
else:
for i in range(subview.shape[0]):
subviews.append(subview[i])
@property
def T(self):
if self.ndim < 2:
return self
else:
return self.transpose()
@property
def flags(self):
c_cont = _get_step(self) == 1
return dict(C_CONTIGUOUS=c_cont,
F_CONTIGUOUS=(c_cont and self.ndim < 2),
OWNDATA=(self._base is None),
WRITEABLE=True, # todo: fix this
ALIGNED=c_cont, # todo: different from contiguous?
UPDATEIFCOPY=False, # We don't support this feature
)
## Methods - managemenet
def fill(self, value):
assert isinstance(value, (int, float))
self[:] = value
def clip(self, a_min, a_max, out=None):
if out is None:
out = empty(self.shape, self.dtype)
L = self._toflatlist()
L = [min(a_max, max(a_min, x)) for x in L]
out[:] = L
return out
def copy(self):
out = empty(self.shape, self.dtype)
out[:] = self
return out
def flatten(self):
out = empty((self.size,), self.dtype)
out[:] = self
return out
def ravel(self):
return self.reshape((self.size, ))
def repeat(self, repeats, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
out = empty((self.size * repeats,), self.dtype)
for i in range(repeats):
out[i*self.size:(i+1)*self.size] = self
return out
def reshape(self, newshape):
out = self.view()
try:
out.shape = newshape
except AttributeError:
out = self.copy()
out.shape = newshape
return out
def transpose(self):
# Numpy returns a view, but we cannot do that since we do not
# support Fortran ordering
ndim = self.ndim
if ndim < 2:
return self.view()
shape = self.shape[::-1]
out = empty(shape, self.dtype)
#
if ndim == 2:
for i in xrange(self.shape[0]):
out[:, i] = self[i, :]
elif ndim == 3:
for i in xrange(self.shape[0]):
for j in xrange(self.shape[1]):
out[:, j, i] = self[i, j, :]
else:
raise ValueError('Tinynumpy supports transpose up to ndim=3')
return out
def astype(self, dtype):
out = empty(self.shape, dtype)
out[:] = self
def view(self, dtype=None, type=None):
if dtype is None:
dtype = self.dtype
if dtype == self.dtype:
return ndarray(self.shape, dtype, buffer=self,
offset=self._offset, strides=self.strides)
elif self.ndim == 1:
itemsize = int(_convert_dtype(dtype, 'short')[-1])
size = self.nbytes // itemsize
offsetinbytes = self._offset * self.itemsize
offset = offsetinbytes // itemsize
return ndarray((size, ), dtype, buffer=self, offset=offset)
else:
raise ValueError('new type not compatible with array.')
## Methods - statistics
# We use the self.flat generator here. self._toflatlist() would be
# faster, but it might take up significantly more memory.
def all(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
return all(self.flat)
def any(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
return any(self.flat)
def min(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
return min(self.flat)
def max(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
return max(self.flat)
#return max(self._toflatlist()) # almost twice as fast
def sum(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
return sum(self.flat)
def prod(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
p = 1.0
for i in self.flat:
p *= float(i)
return p
def mean(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
return self.sum() / self.size
def argmax(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
r = self[[0 for i in range(self.ndim)]]
r_index = 0
for i_index, i in enumerate(self.flat):
v = float(i)
if v > r:
r = v
r_index = i_index
return r_index
def argmin(self, axis=None):
if axis:
raise (TypeError, "axis argument is not supported")
r = self[[0 for i in range(self.ndim)]]
r_index = 0
for i_index, i in enumerate(self.flat):
v = float(i)
if v < r:
r = v
r_index = i_index
return r_index
def cumprod(self, axis=None, out=None):
if axis:
raise (TypeError, "axis argument is not supported")
if out is None:
out = empty((self.size,), self.dtype)
p = 1.0
L = []
for x in self.flat:
p *= x
L.append(p)
out[:] = L
return out
def cumsum(self, axis=None, out=None):
if axis:
raise (TypeError, "axis argument is not supported")
if out is None:
out = empty((self.size,))
L = []
for x in self.flat:
p += x
L.append(p)
out[:] = L
return out
| {
"content_hash": "b77d42582c83357870b1a49c41b19e45",
"timestamp": "",
"source": "github",
"line_count": 991,
"max_line_length": 81,
"avg_line_length": 32.90817356205853,
"alnum_prop": 0.5425916840426837,
"repo_name": "almarklein/tinynumpy",
"id": "5ff200f5c25f18d464c680d46d4dba50ecff15dc",
"size": "33886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinynumpy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5576"
},
{
"name": "Python",
"bytes": "86136"
},
{
"name": "Shell",
"bytes": "5094"
}
],
"symlink_target": ""
} |
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3CAPModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_alert_list_layout",
#"cap_gis_location_xml_post_parse",
#"cap_gis_location_xml_post_render",
)
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ("cap_alert",
"cap_alert_represent",
"cap_warning_priority",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geophysical (inc. landslide)")),
("Met", T("Meteorological (inc. flood)")),
("Safety", T("General emergency and public safety")),
("Security", T("Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue and recovery")),
("Fire", T("Fire suppression and rescue")),
("Health", T("Medical and public health")),
("Env", T("Pollution and other environmental")),
("Transport", T("Public and private transportation")),
("Infra", T("Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
# @todo: can not be empty in alerts (validator!)
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
requires = IS_IN_SET(cap_alert_status_code_opts),
),
Field("msg_type",
label = T("Message Type"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
),
Field("source",
label = T("Source"),
default = self.generate_source,
),
Field("scope",
label = T("Scope"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "text",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
Field("note", "text",
label = T("Note"),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(),
),
# approved_on field for recording when the alert was approved
s3_datetime("approved_on",
readable = False,
writable = False,
),
*s3_meta_fields())
filter_widgets = [
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event_type_id",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
filter_widgets = filter_widgets,
list_layout = cap_alert_list_layout,
list_orderby = "cap_info.expires desc",
onvalidation = self.cap_alert_form_validation,
# update the approved_on field on approve of the alert
onapprove = self.cap_alert_approve,
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
ADD_ALERT = T("Create Alert")
crud_strings[tablename] = Storage(
label_create = ADD_ALERT,
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# ---------------------------------------------------------------------
# Warning Priorities for CAP
tablename = "cap_warning_priority"
define_table(tablename,
Field("priority_rank", "integer",
label = T("Priority Rank"),
length = 2,
),
Field("event_code",
label = T("Event Code"),
),
Field("name", notnull = True, length = 64,
label = T("Name"),
),
Field("event_type",
label = T("Event Type"),
),
Field("urgency",
label = T("Urgency"),
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
label = T("Severity"),
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
label = T("Certainty"),
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("color_code",
label = T("Color Code"),
),
*s3_meta_fields())
priority_represent = S3Represent(lookup = tablename)
crud_strings[tablename] = Storage(
label_create = T("Create Warning Priority"),
title_display = T("Warning Priority Details"),
title_list = T("Warning Priorities"),
title_update = T("Edit Warning Priority"),
title_upload = T("Import Warning Priorities"),
label_list_button = T("List Warning Priorities"),
label_delete_button = T("Delete Warning Priority"),
msg_record_created = T("Warning Priority added"),
msg_record_modified = T("Warning Priority updated"),
msg_record_deleted = T("Warning Priority removed"),
msg_list_empty = T("No Warning Priorities currently registered")
)
# ---------------------------------------------------------------------
# CAP info priority
# @ToDo: i18n: Need label=T("")
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en",
requires = IS_EMPTY_OR(
IS_IN_SET(settings.get_cap_languages())
),
),
Field("category", "list:string",
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
required = True,
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(),
), # 1 or more allowed
self.event_type_id(empty = False,
script = '''
$.filterOptionsS3({
'trigger':'event_type_id',
'target':'priority',
'lookupURL':S3.Ap.concat('/cap/priority_get/'),
'lookupResource':'event_type'
})'''
),
Field("response_type", "list:string",
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(),
), # 0 or more allowed
Field("priority",
represent = priority_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(
db, "cap_warning_priority.id",
priority_represent
),
),
),
Field("urgency",
required = True,
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
required = True,
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
required = True,
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("audience", "text"),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
s3_datetime("effective",
default = "now",
),
s3_datetime("onset"),
s3_datetime("expires",
past = 0,
),
Field("sender_name"),
Field("headline"),
Field("description", "text"),
Field("instruction", "text"),
Field("contact", "text"),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
*s3_meta_fields())
# @ToDo: Move labels into main define_table (can then be lazy & performs better anyway)
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
onaccept = self.info_onaccept,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
),
Field("size", "integer",
writable = False,
),
Field("uri",
# needs a special validation
writable = False,
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
),
Field("digest",
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm(#"name",
"info_id",
"resource_desc",
S3SQLInlineComponent("image",
label=T("Image"),
fields=["file",
],
),
S3SQLInlineComponent("document",
label=T("Document"),
fields=["file",
],
),
)
configure(tablename,
super_entity = "doc_entity",
crud_form = crud_form,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
Field("name",
label = T("Area description"),
required = True,
),
Field("altitude", "integer"), # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
Field("ceiling", "integer"), # Feet above Sea-level in WGS84 (Maximum)
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("name",
"info_id",
# Not yet working with default formstyle or multiple=True
#S3SQLInlineComponent("location",
# name = "location",
# label = "",
# multiple = False,
# fields = [("", "location_id")],
# ),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
),
"altitude",
"ceiling",
)
area_represent = S3Represent(lookup=tablename)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelector(points = False,
polygons = True,
show_map = True,
catalog_layers = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
#configure(tablename,
# deduplicate = self.cap_area_tag_deduplicate,
# )
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y%m%d")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
oid = settings.get_cap_identifier_oid()
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%s-%03d%s%s" % \
(prefix, oid, _time, next_id, ["", "-"][bool(suffix)], suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def generate_source():
"""
Generate a source for CAP alert
"""
return "%s@%s" % (current.xml.domain,
current.deployment_settings.get_base_public_url())
# -------------------------------------------------------------------------
@staticmethod
def template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_form_validation(form):
"""
On Validation for CAP alert form
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
return
# -------------------------------------------------------------------------
@staticmethod
def info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
if alert_id and cap_alert_is_template(alert_id):
db(itable.id == info_id).update(is_template = True)
return True
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_approve(record=None):
"""
Update the approved_on field when alert gets approved
"""
if not record:
return
alert_id = record["id"]
# Update approved_on at the time the alert is approved
if alert_id:
db = current.db
approved_on = record["approved_on"]
db(db.cap_alert.id == alert_id).update(approved_on = current.request.utcnow)
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event_type_id=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
record_id = record.id
table = s3db.cap_info
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Template"), None),
(T("Information template"), "info"),
#(T("Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record_id, record),
_href=URL(c="cap", f="template",
args=[record_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % record_id]),
_target="_blank",
)
auth = current.auth
# Display 'Submit for Approval' based on permission
# and deployment settings
if not r.record.approved_by and \
current.deployment_settings.get_cap_authorisation() and \
auth.s3_has_permission("update", "cap_alert", record_id=r.id):
# Get the user ids for the role alert_approver
db = current.db
agtable = db.auth_group
rows = db(agtable.role == "Alert Approver")._select(agtable.id)
group_rows = db(agtable.id.belongs(rows)).select(agtable.id)
if group_rows:
for group_row in group_rows:
group_id = group_row.id
user_ids = auth.s3_group_members(group_id) # List of user_ids
pe_ids = [] # List of pe_ids
for user_id in user_ids:
pe_ids.append(auth.s3_user_pe_id(int(user_id)))
submit_btn = A(T("Submit for Approval"),
_href = URL(f = "compose",
vars = {"cap_alert.id": record.id,
"pe_ids": pe_ids,
},
),
_class = "action-btn"
)
else:
submit_btn = None
else:
submit_btn = None
table = s3db.cap_area
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
# We have an Area, so we can add Locations
location_tab = (T("Location"), "location")
else:
location_tab = ""
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
location_tab,
(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record_id, record),
_href=URL(c="cap", f="alert",
args=[record_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
if submit_btn:
rheader.insert(1, TR(submit_btn))
elif tablename == "cap_area":
# Shouldn't ever be called
tabs = [(T("Area"), None),
(T("Locations"), "location"),
#(T("Geocodes"), "tag"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_area_location":
# Shouldn't ever be called
# We need the rheader only for the link back to the area.
rheader = DIV(TABLE(TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.area_id),
_href=URL(c="cap", f="area",
args=[record.area_id, "update"]))),
),
))
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
# =============================================================================
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
db(table.id == _id).update(alert_id = alert_id)
return func
# =============================================================================
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
# =============================================================================
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
# =============================================================================
def cap_alert_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for CAP Alerts on the Home page.
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["cap_alert.id"]
item_class = "thumbnail"
#raw = record._row
headline = record["cap_info.headline"]
location = record["cap_area.name"]
description = record["cap_info.description"]
sender = record["cap_info.sender_name"]
headline = A(headline,
# @ToDo: Link to nicely-formatted version of Display page
_href = URL(c="cap", f="alert", args=record_id),
)
headline = DIV(headline,
current.T("in %(location)s") % dict(location=location)
)
item = DIV(headline,
P(description),
P(sender, style="bold"),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
| {
"content_hash": "589147eb71f8ec4e2618f4b8dc1f7533",
"timestamp": "",
"source": "github",
"line_count": 1880,
"max_line_length": 142,
"avg_line_length": 45.99627659574468,
"alnum_prop": 0.4463127218900697,
"repo_name": "michaelhowden/eden",
"id": "9da905e4978170587ccf255d378463fea6d50bc6",
"size": "86498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/s3db/cap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "2357611"
},
{
"name": "HTML",
"bytes": "1320631"
},
{
"name": "JavaScript",
"bytes": "20040869"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "29520986"
},
{
"name": "Ruby",
"bytes": "3611"
},
{
"name": "Shell",
"bytes": "5022"
},
{
"name": "XSLT",
"bytes": "2818129"
}
],
"symlink_target": ""
} |
"""
Script for building the application.
Usage:
python setup.py py2app -gx -O2
"""
import os
from distutils.core import setup
import py2app
appname = "FileMaker Layout Eporter"
appnameshort = "FMPLayoutexporter"
version = "V0.2.0"
copyright = u"Copyright 2009-2015 Karsten Wolf"
infostr = appname + u' ' + version + u' ' + copyright
setup(
app=[{
'script': "FMPLayoutexporter.py",
'plist':{
'CFBundleGetInfoString': infostr,
'CFBundleIdentifier': 'org.kw.Layoutexporter',
'CFBundleShortVersionString': version,
'CFBundleDisplayName': appnameshort,
'CFBundleName': appnameshort,
'CFBundleSignature': 'KWLe',
'LSHasLocalizedDisplayName': False,
'NSAppleScriptEnabled': False,
'NSHumanReadableCopyright': copyright}}],
data_files=["English.lproj/MainMenu.nib",
"English.lproj/OutlineWindow.nib",
"Icon.icns"],
options={
'py2app':{
'iconfile': "Icon.icns",
},
},
)
| {
"content_hash": "c605527ec505b2648bd400f6b7b6fb8f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 58,
"avg_line_length": 23.88888888888889,
"alnum_prop": 0.598139534883721,
"repo_name": "karstenw/FMPLayoutExporter",
"id": "d792955adce3f40f6315bb53772eb528c2f89e42",
"size": "1075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "22821"
}
],
"symlink_target": ""
} |
from puzzle.puzzlepedia import puzzle
def get():
return puzzle.Puzzle('Puzzle 1.3: The Missing Painting', SOURCE)
SOURCE = """
name in {Beth, Charles, David, Frank, Jessica, Karen, Taylor}
job in {attorny, banker, composer, decorator, entrepreneur, filmmaker, gerontologist}
start in {boathouse, cottage, garden, lighthouse, mansion, pond, windmill}
status in {crime, innocent}
# Setup: only one crime was committed.
sum([n.crime for n in name]) == 1
#2
boathouse.crime or cottage.crime or lighthouse.crime or windmill.crime
#4
if entrepreneur.innocent:
not mansion.crime
#5
not gerontologist.crime
#6
Karen == decorator
#7
entrepreneur == cottage or entrepreneur == mansion or entrepreneur == pond
filmmaker == cottage or filmmaker == mansion or filmmaker == pond
gerontologist == cottage or gerontologist == mansion or gerontologist == pond
#8
if charles.crime:
not cottage.crime
#9
if beth.innocent:
beth == banker
beth == windmill
#10
if charles.innocent:
charles == gerontologist
not charles.mansion
not charles.pond
#11
if david.innocent:
not boathouse.crime
#12
if frank.innocent:
frank != entrepreneur
entrepreneur != mansion
#13
if jessica.innocent:
mansion.innocent
pond.innocent
jessica != mansion
jessica != pond
#14
if karen.innocent:
karen == lighthouse
boathouse == innocent
mansion == innocent
#15
if taylor.innocent:
taylor == attorny
taylor == garden
if windmill.crime:
jessica.crime
"""
SOLUTION = """
name | job | start | status
Beth | banker | windmill | innocent
Charles | gerontologist | cottage | innocent
David | entrepreneur | pond | innocent
Frank | filmmaker | mansion | innocent
Jessica | composer | boathouse | innocent
Karen | decorator | lighthouse | crime
Taylor | attorny | garden | innocent
"""
| {
"content_hash": "67aa08c1b2987a86d7de8750cdb9bc2e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 85,
"avg_line_length": 21.0561797752809,
"alnum_prop": 0.695837780149413,
"repo_name": "PhilHarnish/forge",
"id": "62320c91257ed8fca3c7f0979754616019d8945f",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/puzzle/examples/mim/p1_3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "254508"
},
{
"name": "HTML",
"bytes": "12062"
},
{
"name": "JavaScript",
"bytes": "178383"
},
{
"name": "Jupyter Notebook",
"bytes": "1755506"
},
{
"name": "Python",
"bytes": "1033953"
},
{
"name": "Ruby",
"bytes": "800"
},
{
"name": "Shell",
"bytes": "3181"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
class HandJob:
def __init__(self):
self.cap = cv2.VideoCapture(0)
self.samples = []
self.oldPosition = [0,0]
self.frame_hsv = 0 # just declare for now
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.getSample)
def getSample(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("%d %d" % (x,y))
print("click")
self.samples.append(self.frame_hsv[y,x])
def createMultipleThresholds(self, img):
combined_mask = np.zeros((img.shape[0], img.shape[1]), np.uint8) # start off with 2D array of zeros
# range for HSV threshold:
range_hsv = np.array([1,50,50]) # give little flexibility in hue but more in sat & val (b/c lighting) -Philip
for sample in self.samples:
# iterate over each HSV numpy array
mask = cv2.inRange(img, sample - range_hsv, sample + range_hsv)
combined_mask = combined_mask + mask
combined_mask = cv2.medianBlur(combined_mask, 11)
return combined_mask
def getLargestContour(self, img):
contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) != 0:
largestContour = (contours[0], 0) # loop thru and find the largest contour & size
for contour in contours:
size = cv2.contourArea(contour)
if size > largestContour[1]:
largestContour = (contour, size)
return largestContour[0]
else:
return 0
def getContourMoment(self, contour):
m = cv2.moments(contour)
cx = int(m['m10']/(m['m00']+0.01)) # add 0.01 to prevent division by 0 errors
cy = int(m['m01']/(m['m00']+0.01))
return [cx, cy]
def captureImage(self):
position = [0, 0]
velocity = [0, 0]
frame = self.cap.read()[1]
if frame != None:
frame = cv2.flip(frame, 1)
self.frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
threshold_mask = self.createMultipleThresholds(self.frame_hsv)
contour = self.getLargestContour(threshold_mask)
if type(contour) != int:
cv2.drawContours(frame, contour, -1, (0, 255, 255), 2)
position = self.getContourMoment(contour)
cv2.circle(frame, (position[0], position[1]), 5, (0,0,255), -1)
# calculate velocity
velocity = [position[0] - self.oldPosition[0], position[1] - self.oldPosition[1]]
# print velocity
cv2.imshow("Frame", frame)
cv2.waitKey(10)
self.oldPosition = position
return [position, velocity]
| {
"content_hash": "6992fcfbb726ed05894aa9238b94745f",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 111,
"avg_line_length": 29.024390243902438,
"alnum_prop": 0.673109243697479,
"repo_name": "WWPOL/CV-Pong",
"id": "1b825851c0299bccc1fab8b2e3c668994bafae2b",
"size": "2380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/app/hand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2847"
},
{
"name": "Python",
"bytes": "16312"
}
],
"symlink_target": ""
} |
from cannonball.Actor import Actor
from Box2D import *
from pyglet.gl import *
import random
class Smoke(Actor):
z = 0.1
def __init__(self, level, position, linear_velocity):
super(Smoke, self).__init__(level)
self._create_body(position, linear_velocity)
@property
def progress(self):
return ((self.level.time - self.creation_time) /
(self.destruction_time - self.creation_time))
@property
def radius(self):
return 1 + 5 * self.progress
@property
def color(self):
return 1, 1, 1, 1 - self.progress
def _create_body(self, position, linear_velocity):
self.creation_time = self.level.time
self.destruction_time = self.level.time + 0.5 + 0.5 * random.random()
self.level.queue_destroy(self, self.destruction_time -
self.creation_time)
body_def = b2BodyDef()
body_def.position = position
body_def.linearDamping = 2
self.body = self.level.world.CreateBody(body_def)
self.body.userData = self
self.body.linearVelocity = linear_velocity
self.create_shapes()
self.body.SetMassFromShapes()
def create_shapes(self):
shape_def = b2CircleDef()
shape_def.radius = 0.2
shape_def.density = 1
shape_def.restitution = 0.1
shape_def.filter.groupIndex = -1
shape = self.body.CreateShape(shape_def)
shape.SetUserData({'color': (1, 1, 1)})
def draw(self):
super(Smoke, self).draw()
self.dirty_display_list = True
def draw_geometry(self):
glColor4d(*self.color)
texture = self.level.get_texture('../textures/smoke.png')
glEnable(texture.target)
glBindTexture(texture.target, texture.id)
glBegin(GL_QUADS)
for x, y in [(0, 0), (1, 0), (1, 1), (0, 1)]:
glTexCoord2d(x, y)
glVertex2d(self.radius * (2 * x - 1), self.radius * (2 * y - 1))
glEnd()
glDisable(texture.target)
| {
"content_hash": "1dad2b165513403b603ad0f1442bea35",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 31.136363636363637,
"alnum_prop": 0.5858880778588808,
"repo_name": "elemel/cannonball",
"id": "134457784f659748b221e570a6c7d5371e847c75",
"size": "2055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/cannonball/actors/Smoke.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46082"
}
],
"symlink_target": ""
} |
import os
try:
from setuptools import find_packages, setup, Command
except ImportError:
from distutils.core import find_packages, setup, Command
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
DESCRIPTION = 'A lightweight Python crawler framework'
LONG_DESCRIPTION = read('README.md')
setup(
name='pycrawler',
version='0.0.1',
packages=find_packages(),
url='https://github.com/pengmeng/PyCrawler',
license='MIT License',
author='mengpeng',
author_email='mengp3157@gmail.com',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms='any',
install_requires=['beautifulsoup4>=4.3.2',
'bitarray>=0.8.1',
'eventlet>=0.17.2',
'greenlet>=0.4.5',
'pybloom>=1.1',
'pymongo>=3.0.1',
'redis>=2.10.3',
'Unidecode>=0.4.17'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | {
"content_hash": "ac9ab5b0b6b8fb7269ed4d4826b97931",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 31.128205128205128,
"alnum_prop": 0.5749588138385503,
"repo_name": "ymero/PyCrawler",
"id": "03358e87288a4fa548dee294dd9b8730b246c260",
"size": "1214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53186"
}
],
"symlink_target": ""
} |
"""
Transformer sub-package for the pyRdfa package. It contains modules with transformer functions; each may be
invoked by pyRdfa to transform the dom tree before the "real" RDfa processing.
@summary: RDFa Transformer package
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: __init__.py,v 1.8 2012/06/12 11:47:19 ivan Exp $
$Date: 2012/06/12 11:47:19 $
"""
__version__ = "3.0"
# Here are the transfomer functions that are to be performed for all RDFa files, no matter what
def top_about(root, options, state) :
"""
@param root: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
def set_about(node) :
if has_one_of_attributes(node, "rel", "rev") :
if not has_one_of_attributes(top, "about", "src") :
node.setAttribute("about","")
else :
if not has_one_of_attributes(node, "href", "resource", "about", "src") :
node.setAttribute("about","")
from ..host import HostLanguage
from ..utils import has_one_of_attributes
if not has_one_of_attributes(root, "about") :
# The situation is a bit complicated: if a @resource is present without anything else, then it sets
# the subject, ie, should be accepted...
if has_one_of_attributes(root, "resource", "href", "src") :
if has_one_of_attributes(root, "rel", "rev","property") :
root.setAttribute("about","")
else :
root.setAttribute("about","")
if options.host_language in [ HostLanguage.xhtml, HostLanguage.html5, HostLanguage.xhtml5 ] :
if state.rdfa_version >= "1.1" :
pass
else :
for top in root.getElementsByTagName("head") :
if not has_one_of_attributes(top, "href", "resource", "about", "src") :
set_about(top)
for top in root.getElementsByTagName("body") :
if not has_one_of_attributes(top, "href", "resource", "about", "src") :
set_about(top)
def empty_safe_curie(node, options, state) :
"""
Remove the attributes whose value is an empty safe curie. It also adds an 'artificial' flag, ie, an
attribute (called 'emptysc') into the node to signal that there _is_ an attribute with an ignored
safe curie value. The name of the attribute is 'about_pruned' or 'resource_pruned'.
@param node: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
def prune_safe_curie(node,name) :
if node.hasAttribute(name) :
av = node.getAttribute(name)
if av == '[]' :
node.removeAttribute(name)
node.setAttribute(name+'_pruned','')
msg = "Attribute @%s uses an empty safe CURIE; the attribute is ignored" % name
options.add_warning(msg, node=node)
prune_safe_curie(node, "about")
prune_safe_curie(node, "resource")
for n in node.childNodes :
if n.nodeType == node.ELEMENT_NODE :
empty_safe_curie(n, options, state)
def vocab_for_role(node, options, state) :
"""
The value of the @role attribute (defined separately in the U{Role Attribute Specification Lite<http://www.w3.org/TR/role-attribute/#using-role-in-conjunction-with-rdfa>}) should be as if a @vocab value to the
XHTML vocabulary was defined for it. This method turns all terms in role attributes into full URI-s, so that
this would not be an issue for the run-time.
@param node: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
from ..termorcurie import termname, XHTML_URI
def handle_role(node) :
if node.hasAttribute("role") :
old_values = node.getAttribute("role").strip().split()
new_values = ""
for val in old_values :
if termname.match(val) :
new_values += XHTML_URI + val + ' '
else :
new_values += val + ' '
node.setAttribute("role", new_values.strip())
handle_role(node)
for n in node.childNodes :
if n.nodeType == node.ELEMENT_NODE :
vocab_for_role(n, options, state)
| {
"content_hash": "8d7077ca2d2af2b55341aefaa7a669a8",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 210,
"avg_line_length": 36.9327731092437,
"alnum_prop": 0.697155858930603,
"repo_name": "dbs/rdflib",
"id": "bfb88224e1b07f9595dab3538a1af24cd6030b63",
"size": "4420",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rdflib/plugins/parsers/pyRdfa/transform/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "145"
},
{
"name": "HTML",
"bytes": "120202"
},
{
"name": "Jupyter Notebook",
"bytes": "283784"
},
{
"name": "Python",
"bytes": "1470218"
},
{
"name": "Ruby",
"bytes": "28544"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
import uuid
from taskwarrior_blocks_capsule import __version__ as version_string
requirements_path = os.path.join(
os.path.dirname(__file__),
'requirements.txt',
)
try:
from pip.req import parse_requirements
requirements = [
str(req.req) for req in parse_requirements(
requirements_path,
session=uuid.uuid1()
)
]
except ImportError:
requirements = []
with open(requirements_path, 'r') as in_:
requirements = [
req for req in in_.readlines()
if not req.startswith('-')
and not req.startswith('#')
]
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name='taskwarrior-blocks-capsule',
version=version_string,
url='https://github.com/coddingtonbear/taskwarrior-blocks-capsule',
description=(
"Allows you to create tasks using 'blocks:' in addition to the "
"built-in 'depends:' arguments."
),
author='Adam Coddington',
author_email='me@adamcoddington.net',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
install_requires=requirements,
tests_require=['tox'],
cmdclass = {'test': Tox},
packages=find_packages(),
entry_points={
'taskwarrior_preprocessor_capsules': [
'blocks = taskwarrior_blocks_capsule.capsule:Blocks',
],
'taskwarrior_postprocessor_capsules': [
'blocks = taskwarrior_blocks_capsule.capsule:Blocks',
]
},
)
| {
"content_hash": "6a212758c488db172ed290d2929ee9c3",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 72,
"avg_line_length": 27.76388888888889,
"alnum_prop": 0.6213106553276638,
"repo_name": "coddingtonbear/taskwarrior-blocks-capsule",
"id": "a8f7a34f1e0fc3f2c152d91fa47cc43a2ca784f4",
"size": "1999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5013"
}
],
"symlink_target": ""
} |
from os import environ
from fastapi import Depends, FastAPI, HTTPException, Response, status
from sqlalchemy.orm import Session
from app import crud, schemas
from app.database import SessionLocal
CLOUD_RUN_SERVICE = environ.get("K_SERVICE", "")
CLOUD_RUN_REVISION = environ.get("K_REVISION", "")
app = FastAPI(docs_url="/")
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.get("/health/")
def read_health():
return {"health": "true"}
@app.get("/cloud-run-info/")
def read_cloud_run_info():
return {"service": CLOUD_RUN_SERVICE, "revision": CLOUD_RUN_REVISION}
@app.get("/users/", response_model=list[schemas.Users])
def read_users(skip: int = 0, limit: int = 1000, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
@app.get("/users/{user_id}/", response_model=schemas.Users)
def read_user(user_id: str, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
@app.post("/users/", status_code=201, response_model=schemas.Users)
def create_user(user: schemas.UsersBase, db: Session = Depends(get_db)):
return crud.create_user(db=db, user=user)
@app.put("/users/{user_id}/", response_model=schemas.Users)
def update_user(user_id: str, user: schemas.UsersBase, db: Session = Depends(get_db)):
return crud.update_user(db=db, user_id=user_id, user=user)
@app.delete("/users/{user_id}/")
def delete_user(user_id: str, db: Session = Depends(get_db)):
crud.delete_user(db=db, user_id=user_id)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@app.get("/scores/", response_model=list[schemas.Scores])
def read_scores(base_score: int = 9000, skip: int = 0, limit: int = 1000, db: Session = Depends(get_db)):
"""
By default, this api returns scores which have >= 9000.
If you like to get scores which have >= 1000, you should set "base_score: 1000".
If you like to get top 10 scores, you should set "limit: 10".
"""
scores = crud.get_scores(db, base_score=base_score, skip=skip, limit=limit)
return scores
@app.get("/scores/{score_id}/", response_model=schemas.Scores)
def read_score(score_id: str, db: Session = Depends(get_db)):
db_score = crud.get_score(db, score_id=score_id)
if db_score is None:
raise HTTPException(status_code=404, detail="Score not found")
return db_score
@app.post("/scores/", status_code=201, response_model=schemas.Scores)
def create_score(score: schemas.ScoresBase, db: Session = Depends(get_db)):
return crud.create_score(db=db, score=score)
@app.put("/scores/{score_id}/", response_model=schemas.Scores)
def update_score(score_id: str, score: schemas.ScoresBase, db: Session = Depends(get_db)):
return crud.update_score(db=db, score_id=score_id, score=score)
@app.delete("/scores/{score_id}/", status_code=204)
def delete_score(score_id: str, db: Session = Depends(get_db)):
crud.delete_score(db=db, score_id=score_id)
return Response(status_code=status.HTTP_204_NO_CONTENT)
| {
"content_hash": "be3115bd49c356f4e25689ce47277b9d",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 105,
"avg_line_length": 32.618556701030926,
"alnum_prop": 0.6855246523388117,
"repo_name": "cloudspannerecosystem/spanner-sqlalchemy-demo",
"id": "e61330df484a60580a05b10cf3f587741e720a18",
"size": "3740",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "app/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "459"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "25751"
},
{
"name": "Shell",
"bytes": "632"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
__all__ = ["izip", "imap", "iteritems", "xrange"]
try:
from itertools import izip, imap
except ImportError:
izip = zip
imap = map
def iteritems(d):
return d.items()
else:
def iteritems(d):
return d.iteritems()
try:
xrange = xrange
except NameError:
xrange = range
| {
"content_hash": "75fc97fe5b2a97a102008d6d02f4c768",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 16.454545454545453,
"alnum_prop": 0.6160220994475138,
"repo_name": "jellis18/emcee3",
"id": "ecb733d3589001fcf45f73bd1442ac5115807238",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emcee3/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "914"
},
{
"name": "Python",
"bytes": "79045"
},
{
"name": "TeX",
"bytes": "181310"
}
],
"symlink_target": ""
} |
import stun
from ethereum import slogging as logging
from raiden.exceptions import STUNUnavailableException
log = logging.getLogger(__name__)
def stun_socket(
socket,
source_ip='0.0.0.0',
source_port=4200,
stun_host=None,
stun_port=3478
):
timeout = socket.gettimeout()
socket.settimeout(2)
log.debug('Initiating STUN for %s:%s', source_ip, source_port)
nat_type, nat = stun.get_nat_type(
socket,
source_ip,
source_port,
stun_host=stun_host,
stun_port=stun_port
)
external_ip = nat['ExternalIP']
if isinstance(external_ip, tuple):
external_ip = external_ip[0]
if external_ip is None:
log.warning('STUN failed', nat=nat)
raise STUNUnavailableException()
external_port = nat['ExternalPort']
log.debug(
'STUN-socket ready:',
external_ip=external_ip,
external_port=external_port,
nat_type=nat_type,
nat=nat,
internal_ip=socket.getsockname()[0],
internal_port=socket.getsockname()[1],
)
nat['type'] = nat_type
socket.settimeout(timeout)
return external_ip, external_port, nat
| {
"content_hash": "fabf30347a81b0bc6be76f82d98433e0",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 66,
"avg_line_length": 26.044444444444444,
"alnum_prop": 0.6279863481228669,
"repo_name": "tomashaber/raiden",
"id": "eefa01e0c3d6818976384830307adbb55c67e772",
"size": "1196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raiden/network/stunsock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4536"
},
{
"name": "HTML",
"bytes": "21998"
},
{
"name": "JavaScript",
"bytes": "1996"
},
{
"name": "Makefile",
"bytes": "5184"
},
{
"name": "Python",
"bytes": "1222610"
},
{
"name": "Shell",
"bytes": "4570"
},
{
"name": "TypeScript",
"bytes": "75150"
}
],
"symlink_target": ""
} |
from opencivicdata.models import (Jurisdiction,
Organization,
Person,
Bill,
VoteEvent,
Event,
Division
)
from .helpers import (PublicListEndpoint,
PublicDetailEndpoint,
get_field_list)
from .serialize import (JURISDICTION_SERIALIZE,
ORGANIZATION_SERIALIZE,
PERSON_SERIALIZE,
VOTE_SERIALIZE,
BILL_SERIALIZE,
EVENT_SERIALIZE,
DIVISION_SERIALIZE
)
from restless.http import HttpError
import datetime
from django.db.models import Q
"""
This module contains the class-based views that we expose over the API.
The common logic for these views are in imago.helpers.*Endpoint
"""
class JurisdictionList(PublicListEndpoint):
model = Jurisdiction
serialize_config = JURISDICTION_SERIALIZE
default_fields = ['id', 'name', 'url', 'classification', 'feature_flags',
'division.id', 'division.name']
def adjust_filters(self, params):
if 'name' in params:
params['name__icontains'] = params.pop('name')
if 'feature_flags' in params:
params['feature_flags__contains'] = [params.pop('feature_flags')]
return params
class JurisdictionDetail(PublicDetailEndpoint):
model = Jurisdiction
serialize_config = JURISDICTION_SERIALIZE
default_fields = get_field_list(model, without=[
'event_locations',
'events',
'organizations',
'division',
'locked_fields',
'runs',
]) + [
'division.id', 'division.name'
]
class OrganizationList(PublicListEndpoint):
model = Organization
serialize_config = ORGANIZATION_SERIALIZE
default_fields = ['id', 'name', 'image', 'classification',
'jurisdiction.id', 'parent.id', 'parent.name',
]
class OrganizationDetail(PublicDetailEndpoint):
model = Organization
serialize_config = ORGANIZATION_SERIALIZE
default_fields = get_field_list(model, without=[
'memberships_on_behalf_of', 'billactionrelatedentity',
'eventrelatedentity', 'eventparticipant', 'jurisdiction_id',
'billsponsorship', 'memberships', 'parent_id', 'children', 'actions',
'parent', 'posts', 'bills', 'votes', 'locked_fields',
]) + [
'parent.id',
'parent.name',
'memberships.start_date',
'memberships.end_date',
'memberships.person.id',
'memberships.person.name',
'memberships.post.id',
'children.id',
'children.name',
'jurisdiction.id',
'jurisdiction.name',
'jurisdiction.division.id',
'jurisdiction.division.name',
'posts.id',
'posts.label',
'posts.division_id',
'posts.role',
]
class PeopleList(PublicListEndpoint):
model = Person
serialize_config = PERSON_SERIALIZE
default_fields = [
'name', 'id', 'sort_name', 'image', 'gender',
'memberships.organization.id',
'memberships.organization.name',
'memberships.organization.classification',
'memberships.organization.jurisdiction.id',
'memberships.organization.jurisdiction.name',
'memberships.post.id',
'memberships.post.label',
'memberships.post.role',
]
def adjust_filters(self, params):
lat = params.pop('lat', None)
lon = params.pop('lon', None)
if lat and lon:
params['memberships__post__division__geometries__boundary__shape__contains'] = 'POINT({} {})'.format(lon, lat)
elif lat or lon:
raise HttpError(400, "must specify lat & lon together")
return params
class PersonDetail(PublicDetailEndpoint):
model = Person
serialize_config = PERSON_SERIALIZE
default_fields = get_field_list(model, without=[
'votes',
'billactionrelatedentity',
'eventparticipant',
'billsponsorship',
'eventrelatedentity',
'memberships',
'locked_fields',
]) + [
'memberships.label',
'memberships.role',
'memberships.start_date',
'memberships.end_date',
'memberships.post.label',
'memberships.post.role',
'memberships.post.id',
'memberships.post.division.id',
'memberships.post.division.name',
'memberships.contact_details.type',
'memberships.contact_details.value',
'memberships.contact_details.note',
'memberships.contact_details.label',
'memberships.organization.id',
'memberships.organization.name',
'memberships.organization.jurisdiction.id',
]
class BillList(PublicListEndpoint):
model = Bill
serialize_config = BILL_SERIALIZE
default_fields = [
'id', 'identifier', 'title', 'classification', 'subject',
'from_organization.name',
'from_organization.id',
'from_organization.jurisdiction.id',
'from_organization.jurisdiction.name',
]
def adjust_filters(self, params):
if 'subject' in params:
params['subject__contains'] = [params.pop('subject')]
if 'classification' in params:
params['classification__contains'] = [params.pop('classification')]
return params
class VoteList(PublicListEndpoint):
model = VoteEvent
serialize_config = VOTE_SERIALIZE
default_fields = [
'result', 'motion_text', 'created_at', 'start_date', 'updated_at',
'motion_classification', 'extras', 'id',
'counts.value',
'counts.option',
'bill.identifier', 'bill.id',
'organization.id', 'organization.name',
]
def adjust_filters(self, params):
if 'motion_classification' in params:
params['motion_classification__contains'] = [params.pop('motion_classification')]
return params
class VoteDetail(PublicDetailEndpoint):
model = VoteEvent
serialize_config = VOTE_SERIALIZE
default_fields = get_field_list(model, without=[
'eventrelatedentity',
'legislative_session_id',
'bill',
'legislative_session',
'organization',
'organization_id',
'bill_id',
'locked_fields',
]) + [
'bill.id',
'bill.identifier',
'bill.legislative_session.identifier',
'organization.id',
'organization.name',
'organization.classification',
]
class BillDetail(PublicDetailEndpoint):
model = Bill
serialize_config = BILL_SERIALIZE
default_fields = get_field_list(model, without=[
'from_organization_id',
'eventrelatedentity',
'related_bills_reverse',
'legislative_session_id',
'actions.organization',
'votes',
'locked_fields',
]) + [
'from_organization.id',
'from_organization.name',
'legislative_session.identifier',
'actions.description',
'actions.date',
'actions.classification',
'actions.organization.id',
'actions.organization.name',
'actions.related_entities.name',
'actions.related_entities.organization_id',
'actions.related_entities.person_id',
'actions.related_entities.entity_type',
'votes.result',
'votes.motion_text',
'votes.start_date',
'votes.motion_classification',
'votes.id',
'votes.counts',
]
class EventList(PublicListEndpoint):
model = Event
serialize_config = EVENT_SERIALIZE
default_fields = [
'id', 'name', 'description', 'classification', 'start_time',
'timezone', 'end_time', 'all_day', 'status',
]
class EventDetail(PublicDetailEndpoint):
model = Event
serialize_config = EVENT_SERIALIZE
default_fields = get_field_list(model, without=[
'location_id',
'locked_fields',
])
class DivisionList(PublicListEndpoint):
model = Division
serialize_config = DIVISION_SERIALIZE
default_fields = ['id', 'name', 'country']
def filter(self, data, **params):
DATE_FORMAT = "%Y-%m-%d"
today = datetime.datetime.strftime(datetime.datetime.now(), DATE_FORMAT)
lat = params.get('lat')
lon = params.get('lon')
date = datetime.datetime.strptime(
params.get('date', today), DATE_FORMAT).date()
if params.get('date') and not (lat and lon):
raise HttpError(400, "If date specified, must also provide lat & lon")
if (lat and lon):
data = data.filter(
Q(geometries__boundary__set__start_date__lte=date) | Q(geometries__boundary__set__start_date=None),
Q(geometries__boundary__set__end_date__gte=date) | Q(geometries__boundary__set__end_date=None),
geometries__boundary__shape__contains='POINT({} {})'.format(lon, lat)
)
elif (lat and not lon) or (lon and not lat):
raise HttpError(400, "Must specify lat & lon together")
return data
class DivisionDetail(PublicDetailEndpoint):
model = Division
serialize_config = DIVISION_SERIALIZE
default_fields = ['id',
'name',
'country',
'jurisdictions',
'children',
'geometries',
'posts.id',
'posts.organization.id',
'posts.organization.name',
'posts.organization.classification',
'posts.label',
'posts.role']
| {
"content_hash": "ef2e05db7d98909a01eb71323898380e",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 122,
"avg_line_length": 30.854037267080745,
"alnum_prop": 0.5753397081026673,
"repo_name": "opencivicdata/imago",
"id": "8d5ebd8d1bf20a6fb0ad4c1e77d95a7d38081b9c",
"size": "10076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imago/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39010"
}
],
"symlink_target": ""
} |
import sys
import os
import shutil
def cleanup_dump(dumpstr):
cardfrags = dumpstr.split('\n\n')
if len(cardfrags) < 4:
return ''
else:
return '\n\n'.join(cardfrags[2:-1]) + '\n\n'
def identify_checkpoints(basedir, ident):
cp_infos = []
for path in os.listdir(basedir):
fullpath = os.path.join(basedir, path)
if not os.path.isfile(fullpath):
continue
if not (path[:13] == 'lm_lstm_epoch' and path[-4:] == '.txt'):
continue
if not ident in path:
continue
# attempt super hacky parsing
inner = path[13:-4]
halves = inner.split('_')
if not len(halves) == 2:
continue
parts = halves[1].split('.')
if not len(parts) == 6:
continue
# lm_lstm_epoch[25.00_0.3859.t7.output.1.0].txt
if not parts[3] == ident:
continue
epoch = halves[0]
vloss = '.'.join([parts[0], parts[1]])
temp = '.'.join([parts[4], parts[5]])
cpname = 'lm_lstm_epoch' + epoch + '_' + vloss + '.t7'
cp_infos += [(fullpath, os.path.join(basedir, cpname),
(epoch, vloss, temp))]
return cp_infos
def process_dir(basedir, targetdir, ident, copy_cp = False, verbose = False):
(basepath, basedirname) = os.path.split(basedir)
if basedirname == '':
(basepath, basedirname) = os.path.split(basepath)
cp_infos = identify_checkpoints(basedir, ident)
for (dpath, cpath, (epoch, vloss, temp)) in cp_infos:
if verbose:
print('found dumpfile ' + dpath)
dname = basedirname + '_epoch' + epoch + '_' + vloss + '.' + ident + '.' + temp + '.txt'
cname = basedirname + '_epoch' + epoch + '_' + vloss + '.t7'
tdpath = os.path.join(targetdir, dname)
tcpath = os.path.join(targetdir, cname)
if verbose:
print(' cpx ' + dpath + ' ' + tdpath)
with open(dpath, 'rt') as infile:
with open(tdpath, 'wt') as outfile:
outfile.write(cleanup_dump(infile.read()))
if copy_cp:
if os.path.isfile(cpath):
if verbose:
print(' cp ' + cpath + ' ' + tcpath)
shutil.copy(cpath, tcpath)
if copy_cp and len(cp_infos) > 0:
cmdpath = os.path.join(basedir, 'command.txt')
tcmdpath = os.path.join(targetdir, basedirname + '.command')
if os.path.isfile(cmdpath):
if verbose:
print(' cp ' + cmdpath + ' ' + tcmdpath)
shutil.copy(cmdpath, tcmdpath)
for path in os.listdir(basedir):
fullpath = os.path.join(basedir, path)
if os.path.isdir(fullpath):
process_dir(fullpath, targetdir, ident, copy_cp=copy_cp, verbose=verbose)
def main(basedir, targetdir, ident = 'output', copy_cp = False, verbose = False):
process_dir(basedir, targetdir, ident, copy_cp=copy_cp, verbose=verbose)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('basedir', #nargs='?'. default=None,
help='base rnn directory, must contain sample.lua')
parser.add_argument('targetdir', #nargs='?', default=None,
help='checkpoint directory, all subdirectories will be processed')
parser.add_argument('-c', '--copy_cp', action='store_true',
help='copy checkpoints used to generate the output files')
parser.add_argument('-i', '--ident', action='store', default='output',
help='identifier to look for to determine checkpoints')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
args = parser.parse_args()
main(args.basedir, args.targetdir, ident=args.ident, copy_cp=args.copy_cp, verbose=args.verbose)
exit(0)
| {
"content_hash": "eaeeee699cd218bb36739bfc1610b787",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 100,
"avg_line_length": 40.19387755102041,
"alnum_prop": 0.556994160954557,
"repo_name": "billzorn/mtgencode",
"id": "95df0f09ffd774a68c925aeb03474a03ba3d0327",
"size": "3961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/collect_checkpoints.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "110092"
},
{
"name": "Python",
"bytes": "267387"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
from grab.selector import XpathSelector
class Reference(object):
def __init__(self, node, query=None, query_args=None):
self._node = node
self._query = query
self._query_args = {} if query_args is None else query_args
def __getattr__(self, key):
return Reference(self._node, query=key)
def _selector(self):
return XpathSelector(self._node)
def _text(self):
return self._selector().select('.//%s' % self._query).text()
def _node(self):
return self._selector().node()
#def __call__(self, **kwargs):
#self.query_args.update(kwargs)
#return self
| {
"content_hash": "30914784422f3f83f9d3777414f48060",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 28,
"alnum_prop": 0.5978260869565217,
"repo_name": "subeax/grab",
"id": "dad4cd49596fb281b8562ba028e947193fd0c753",
"size": "644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grab/reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "976"
},
{
"name": "Perl",
"bytes": "45"
},
{
"name": "Python",
"bytes": "739023"
},
{
"name": "Shell",
"bytes": "317"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
class CommunityFundProposalVoteListTest(NavCoinTestFramework):
"""Tests the proposalvotelist function of the Community fund."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = self.setup_nodes()
self.is_network_split = split
def run_test(self):
self.nodes[0].staking(False)
activate_cfund(self.nodes[0])
self.nodes[0].donatefund(1000)
# Preflight checks
assert(len(self.nodes[0].proposalvotelist()["yes"]) == 0)
assert(len(self.nodes[0].proposalvotelist()["no"]) == 0)
assert(len(self.nodes[0].proposalvotelist()["null"]) == 0)
address0 = self.nodes[0].getnewaddress()
# Create 3 proposals
proposalid0 = self.nodes[0].createproposal(address0, 1, 3600, "test0")["hash"]
proposalid1 = self.nodes[0].createproposal(address0, 1, 3600, "test1")["hash"]
proposalid2 = self.nodes[0].createproposal(address0, 1, 3600, "test2")["hash"]
slow_gen(self.nodes[0], 1)
# Verify the proposals are now in the proposal vote list
assert(len(self.nodes[0].proposalvotelist()["yes"]) == 0)
assert(len(self.nodes[0].proposalvotelist()["no"]) == 0)
assert(len(self.nodes[0].proposalvotelist()["null"]) == 3)
# Vote on the proposals as wished
self.nodes[0].proposalvote(proposalid0, "yes")
self.nodes[0].proposalvote(proposalid1, "no")
self.nodes[0].proposalvote(proposalid2, "yes")
self.nodes[0].proposalvote(proposalid2, "remove")
# Verify the proposal vote list has changed according to the wallet's votes
assert(len(self.nodes[0].proposalvotelist()["yes"]) == 1)
assert(len(self.nodes[0].proposalvotelist()["no"]) == 1)
assert(len(self.nodes[0].proposalvotelist()["null"]) == 1)
# Verify the hashes are contained in the output vote list
assert(proposalid0 == self.nodes[0].proposalvotelist()["yes"][0]["hash"])
assert(proposalid1 == self.nodes[0].proposalvotelist()["no"][0]["hash"])
assert(proposalid2 == self.nodes[0].proposalvotelist()["null"][0]["hash"])
# Revote differently
self.nodes[0].proposalvote(proposalid0, "no")
self.nodes[0].proposalvote(proposalid1, "yes")
self.nodes[0].proposalvote(proposalid2, "no")
# Verify the proposal vote list has changed according to the wallet's votes
assert(len(self.nodes[0].proposalvotelist()["yes"]) == 1)
assert(len(self.nodes[0].proposalvotelist()["no"]) == 2)
assert(len(self.nodes[0].proposalvotelist()["null"]) == 0)
# Create new proposal
proposalid3 = self.nodes[0].createproposal(address0, 1, 3600, "test3")["hash"]
slow_gen(self.nodes[0], 1)
# Check the new proposal has been added to "null" of proposal vote list
assert(len(self.nodes[0].proposalvotelist()["null"]) == 1)
assert(proposalid3 == self.nodes[0].proposalvotelist()["null"][0]["hash"])
if __name__ == '__main__':
CommunityFundProposalVoteListTest().main()
| {
"content_hash": "795c6c3eaa7cedb1facdfa344e88883d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 86,
"avg_line_length": 42.17948717948718,
"alnum_prop": 0.6340425531914894,
"repo_name": "navcoindev/navcoin-core",
"id": "091e9c11f39233352bc1903d3bfdff745b2bb13e",
"size": "3500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/cfund-proposalvotelist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3655915"
},
{
"name": "C++",
"bytes": "4954999"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "176582"
},
{
"name": "Makefile",
"bytes": "105930"
},
{
"name": "Objective-C",
"bytes": "3771"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "946426"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3792"
},
{
"name": "Shell",
"bytes": "426873"
}
],
"symlink_target": ""
} |
import testtools
from testtools import matchers
from tempest.api.volume import base
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
class VolumesBackupsTest(base.BaseVolumeTest):
@classmethod
def skip_checks(cls):
super(VolumesBackupsTest, cls).skip_checks()
if not CONF.volume_feature_enabled.backup:
raise cls.skipException("Cinder backup feature disabled")
def restore_backup(self, backup_id):
# Restore a backup
restored_volume = self.backups_client.restore_backup(
backup_id)['restore']
# Delete backup
self.addCleanup(self.volumes_client.delete_volume,
restored_volume['volume_id'])
self.assertEqual(backup_id, restored_volume['backup_id'])
waiters.wait_for_volume_resource_status(self.backups_client,
backup_id, 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
restored_volume['volume_id'],
'available')
return restored_volume
@testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
'ceph does not support arbitrary container names')
@decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
def test_volume_backup_create_get_detailed_list_restore_delete(self):
# Create a volume with metadata
metadata = {"vol-meta1": "value1",
"vol-meta2": "value2",
"vol-meta3": "value3"}
volume = self.create_volume(metadata=metadata)
self.addCleanup(self.volumes_client.delete_volume,
volume['id'])
# Create a backup
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
description = data_utils.rand_name("volume-backup-description")
backup = self.create_backup(volume_id=volume['id'],
name=backup_name,
description=description,
container='container')
self.assertEqual(backup_name, backup['name'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# Get a given backup
backup = self.backups_client.show_backup(backup['id'])['backup']
self.assertEqual(backup_name, backup['name'])
self.assertEqual(description, backup['description'])
self.assertEqual('container', backup['container'])
# Get all backups with detail
backups = self.backups_client.list_backups(
detail=True)['backups']
for backup_info in backups:
self.assertIn('created_at', backup_info)
self.assertIn('links', backup_info)
self.assertIn((backup['name'], backup['id']),
[(m['name'], m['id']) for m in backups])
restored_volume = self.restore_backup(backup['id'])
restored_volume_metadata = self.volumes_client.show_volume(
restored_volume['volume_id'])['volume']['metadata']
# Verify the backups has been restored successfully
# with the metadata of the source volume.
self.assertThat(restored_volume_metadata.items(),
matchers.ContainsAll(metadata.items()))
@decorators.idempotent_id('07af8f6d-80af-44c9-a5dc-c8427b1b62e6')
@utils.services('compute')
def test_backup_create_attached_volume(self):
"""Test backup create using force flag.
Cinder allows to create a volume backup, whether the volume status
is "available" or "in-use".
"""
# Create a server
volume = self.create_volume()
self.addCleanup(self.volumes_client.delete_volume,
volume['id'])
server = self.create_server()
# Attach volume to instance
self.attach_volume(server['id'], volume['id'])
# Create backup using force flag
backup_name = data_utils.rand_name(
self.__class__.__name__ + '-Backup')
backup = self.create_backup(volume_id=volume['id'],
name=backup_name, force=True)
self.assertEqual(backup_name, backup['name'])
@decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
@utils.services('image')
def test_bootable_volume_backup_and_restore(self):
# Create volume from image
img_uuid = CONF.compute.image_ref
volume = self.create_volume(imageRef=img_uuid)
volume_details = self.volumes_client.show_volume(
volume['id'])['volume']
self.assertEqual('true', volume_details['bootable'])
# Create a backup
backup = self.create_backup(volume_id=volume['id'])
# Restore the backup
restored_volume_id = self.restore_backup(backup['id'])['volume_id']
# Verify the restored backup volume is bootable
restored_volume_info = self.volumes_client.show_volume(
restored_volume_id)['volume']
self.assertEqual('true', restored_volume_info['bootable'])
class VolumesBackupsV39Test(base.BaseVolumeTest):
_api_version = 3
min_microversion = '3.9'
max_microversion = 'latest'
@classmethod
def skip_checks(cls):
super(VolumesBackupsV39Test, cls).skip_checks()
if not CONF.volume_feature_enabled.backup:
raise cls.skipException("Cinder backup feature disabled")
@decorators.idempotent_id('9b374cbc-be5f-4d37-8848-7efb8a873dcc')
def test_update_backup(self):
# Create volume and backup
volume = self.create_volume()
backup = self.create_backup(volume_id=volume['id'])
# Update backup and assert response body for update_backup method
update_kwargs = {
'name': data_utils.rand_name(self.__class__.__name__ + '-Backup'),
'description': data_utils.rand_name("volume-backup-description")
}
update_backup = self.backups_client.update_backup(
backup['id'], **update_kwargs)['backup']
self.assertEqual(backup['id'], update_backup['id'])
self.assertEqual(update_kwargs['name'], update_backup['name'])
self.assertIn('links', update_backup)
# Assert response body for show_backup method
retrieved_backup = self.backups_client.show_backup(
backup['id'])['backup']
for key in update_kwargs:
self.assertEqual(update_kwargs[key], retrieved_backup[key])
| {
"content_hash": "c001b2a5dd6a7da57fda22f5403b796f",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 41.03012048192771,
"alnum_prop": 0.607693437087065,
"repo_name": "Juniper/tempest",
"id": "1e240b8ecf694a2b9379292d4dae0e267f2e0224",
"size": "7440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/volume/test_volumes_backup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
} |
import unittest
from lxml import etree
from form import Form
class TestEventIsEmpty(unittest.TestCase):
def test_is_empty(self):
raw_xml = """
<person_form_event>
<person>
<study_id>60</study_id>
<all_form_events>
<form>
<name>hcv_rna_results</name>
<event>
<name>1_arm_1</name>
<field><name/><value/></field>
<field><name>hcv_lbdtc</name><value/></field>
</event>
</form>
</all_form_events>
</person>
</person_form_event>"""
form = Form(etree.fromstring(raw_xml))
event = form.events().next()
self.assertTrue(event.is_empty())
def test_is_not_empty(self):
raw_xml = """
<person_form_event>
<person>
<study_id>60</study_id>
<all_form_events>
<form>
<name>hcv_rna_results</name>
<event>
<name>1_arm_1</name>
<field><name/><value>o hai</value></field>
<field><name>hcv_lbdtc</name><value/></field>
</event>
</form>
</all_form_events>
</person>
</person_form_event>"""
form = Form(etree.fromstring(raw_xml))
event = form.events().next()
self.assertFalse(event.is_empty())
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c9b986058a253ae603977e0a5d904166",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 67,
"avg_line_length": 29.472727272727273,
"alnum_prop": 0.42628007402837753,
"repo_name": "indera/redi",
"id": "8211a7e940b58993b1602b315ba350c973d8e3a6",
"size": "1621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/TestEventIsEmpty.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "10233"
},
{
"name": "PHP",
"bytes": "24097"
},
{
"name": "Perl",
"bytes": "848"
},
{
"name": "Python",
"bytes": "427169"
},
{
"name": "R",
"bytes": "7847"
},
{
"name": "Ruby",
"bytes": "363227"
},
{
"name": "Shell",
"bytes": "14860"
},
{
"name": "XSLT",
"bytes": "15145"
}
],
"symlink_target": ""
} |
import ConfigParser
import csv
import glob
import MySQLdb
import string
from bulkinsert import *
# Read database configuration from config file
config = ConfigParser.ConfigParser()
config.read("oa_alpha_etl.cnf")
username = config.get('database', 'username')
password = config.get('database', 'password')
hostname = config.get('database', 'hostname')
database = config.get('database', 'database')
dbConn = MySQLdb.connect(host=hostname,user=username,passwd=password,db=database)
cur = dbConn.cursor()
query = "TRUNCATE TABLE `OS_Locator`;"
cur.execute(query)
fields = ["Name", "Classification", "Centx", "Centy", "Minx", "Maxx", "Miny", "Maxy", "Settlement", "Locality", "Cou_Unit", "Local Authority", "Tile_10k", "Tile_25k", "Source", "MBR25"]
# basequery = "INSERT INTO OS_Locator(`Name`, `Classification`, `Centx`, `Centy`, `Minx`, `Maxx`, `Miny`, `Maxy`, `Settlement`, `Locality`, `Cou_Unit`, `Local Authority`, `Tile_10k`, `Tile_25k`, `Source`) "
bi = BulkInsert(cur,"OS_Locator",fields)
nrecs = 0
for file in glob.glob("OS*.txt"):
print file
csvfile = open(file, 'rb')
reader = csv.reader(csvfile, delimiter=':', quoting=csv.QUOTE_NONE)
for row in reader:
nrecs += 1
# print row
if (nrecs % 10000) == 0:
print "Records read: " + str(nrecs)
minx = int(row[4]) - 25
maxx = int(row[5]) + 25
miny = int(row[6]) - 25
maxy = int(row[7]) + 25
poly = "GeomFromText('Polygon(("+str(minx)+" "+str(miny)+","+str(minx)+" "+str(maxy)+","+str(maxx)+" "+str(maxy)+","+str(maxx)+" "+str(miny)+","+str(minx)+" "+str(miny)+"))')"
# print poly
row.append(poly)
bi.addRow(row)
# query = basequery + "VALUES(" + string.join(["'" + field.replace("'","\\'") + "'" for field in row],",") + ");"
# print query
# cur.execute(query)
print "Records read: " + str(nrecs)
bi.close()
dbConn.commit()
dbConn.close()
| {
"content_hash": "339d9d694dca6a8f9f4ce7f7b2480297",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 206,
"avg_line_length": 34.40350877192982,
"alnum_prop": 0.6053034166241713,
"repo_name": "OpenAddressesUK/common-ETL",
"id": "ab2226af86f8f17966d72b910687a3ddd1fe3408",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OS_Locator_ETL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49019"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.conf import settings
from django.contrib.messages.api import error, MessageFailure
from django.shortcuts import redirect
from social_auth.exceptions import SocialAuthBaseException
from social_auth.utils import backend_setting, get_backend_name
class SocialAuthExceptionMiddleware(object):
"""Middleware that handles Social Auth AuthExceptions by providing the user
with a message, logging an error, and redirecting to some next location.
By default, the exception message itself is sent to the user and they are
redirected to the location specified in the LOGIN_ERROR_URL setting.
This middleware can be extended by overriding the get_message or
get_redirect_uri methods, which each accept request and exception.
"""
def process_exception(self, request, exception):
self.backend = self.get_backend(request, exception)
if self.raise_exception(request, exception):
return
if isinstance(exception, SocialAuthBaseException):
backend_name = get_backend_name(self.backend)
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
tags = ['social-auth']
if backend_name:
tags.append(backend_name)
try:
error(request, message, extra_tags=' '.join(tags))
except MessageFailure: # messages app is not installed
url += ('?' in url and '&' or '?') + 'message=' + message
if backend_name:
url += '&backend=' + backend_name
return redirect(url)
def get_backend(self, request, exception):
if not hasattr(self, 'backend'):
self.backend = (
getattr(request, 'backend', None)
or getattr(exception, 'backend', None)
)
return self.backend
def raise_exception(self, request, exception):
backend = self.backend
return backend and backend_setting(backend, 'SOCIAL_AUTH_RAISE_EXCEPTIONS')
def get_message(self, request, exception):
return unicode(exception)
def get_redirect_uri(self, request, exception):
if self.backend is not None:
return (
backend_setting(self.backend, 'SOCIAL_AUTH_BACKEND_ERROR_URL')
or settings.LOGIN_ERROR_URL
)
return settings.LOGIN_ERROR_URL
| {
"content_hash": "97246655fe82edd0a4c080f45faca20a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 83,
"avg_line_length": 39.34920634920635,
"alnum_prop": 0.6442113755546591,
"repo_name": "fotinakis/sentry",
"id": "67052db9ab18407f30ff17bcbe7689b67531ad5f",
"size": "2503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/social_auth/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222885"
},
{
"name": "HTML",
"bytes": "282398"
},
{
"name": "JavaScript",
"bytes": "927323"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5812"
},
{
"name": "Python",
"bytes": "11654397"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
} |
from flask import render_template, session, redirect, url_for, current_app, abort, flash, request, make_response
from flask.ext.login import login_user, logout_user, login_required, current_user
from .. import db
from ..models import User, Role,Post, Permission, Follow, Comment
from ..email import send_email
from . import main
from ..decorators import admin_required, permission_required
from .forms import NameForm, EditProfileForm, EditProfileAdminForm, PostForm, CommentForm
@main.route('/', methods=['GET', 'POST'])
def index():
form=PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and form.validate_on_submit():
post = Post(body=form.body.data, author=current_user._get_current_object())
db.session.add(post)
db.session.commit()
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
pagination = query.order_by(Post.timestamp.desc()).paginate(page,
per_page = current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
return render_template('index.html', form=form, posts = posts, pagination=pagination, show_followed = show_followed)
@main.route('/user/<username>', methods=['GET', 'POST'])
def user(username):
user = User.query.filter_by(username=username).first()
if user is None:
abort(404)
posts = user.posts.order_by(Post.timestamp.desc()).all()
return render_template('user.html', user = user, posts=posts)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data, post = post,
author = current_user._get_current_object())
db.session.add(comment)
db.session.commit()
flash('your comment has been published')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() -1) / current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.desc()).paginate(
page, per_page = current_app.config['FLASKY_COMMENTS_PER_PAGE'], error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form, comments=comments, pagination=pagination)
@main.route('/edit-posts/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_post(id):
post = Post.query.get_or_404(id)
form = PostForm()
if form.validate_on_submit():
if current_user != post.author and not current_user.can(Permission.ADMINISTER):
abort(403)
post.body = form.body.data
db.session.add(post)
db.session.commit()
flash('The posts has been update.')
return redirect(url_for('.post', id=id))
form.body.data = post.body
return render_template('edit_posts.html', form=form)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
db.session.commit()
flash(' Your profile has been updated ')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form = form)
@main.route('/edit-profile/<id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role_id = form.role.data
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
db.session.commit()
flash('Your profile has been updated')
return redirect(url_for('.user', username = user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalide user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('you have already followed this user')
return redirect(url_for('.user', username=username))
current_user.follow(user)
flash('You are following the %s.' %username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not follow this user')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
flash('You are not following %s anymore' %username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(page,
per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)
follows = [{'user':item.follower, 'timestamp':item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination, follows=follows)
@main.route('/followed_by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user')
return redirect(url_for('.index'))
page = request.args.get('page', 1 ,type=int)
pagination = user.followed.paginate(page,
per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)
follows = [{'user':item.followed, 'timestamp':item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination, follows=follows)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '', max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '1', max_age=30*24*60*60)
return resp
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(page,
per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'], error_out=False)
comments = pagination.items
return render_template('moderate.html', comments=comments, page=page, pagination=pagination)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_enable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
db.session.commit()
return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
db.session.commit()
return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))
| {
"content_hash": "7b8a94ce9f755542a983a589cc8f8ad7",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 117,
"avg_line_length": 36.418502202643175,
"alnum_prop": 0.7228740776581589,
"repo_name": "menghao2015/MyBlog",
"id": "4af087c6fe07dd9575e5063c981791baabff31fe",
"size": "8267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myapp/main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "14852"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "52302"
}
],
"symlink_target": ""
} |
import logging
import os
import configparser
config = configparser.RawConfigParser()
HERE = os.path.dirname(__file__)
configfiles = [
os.path.join(HERE, 'tictactoe.default.cfg'), # default config
'/etc/tictactoe.cfg', # staging/live
os.path.join(HERE, 'tictactoe.cfg'), # per-environment config
]
files_read = config.read(configfiles)
logging.debug('config files read: %s' % files_read)
| {
"content_hash": "469805ef9f993227cafa682930f4b31c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 24.38888888888889,
"alnum_prop": 0.6583143507972665,
"repo_name": "Motiejus/tictactoe",
"id": "85e5a96405d5febd7fda499956e9d0fba3d32a22",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tictactoe/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2248"
},
{
"name": "JavaScript",
"bytes": "3137"
},
{
"name": "Python",
"bytes": "35588"
}
],
"symlink_target": ""
} |
""" Complete the code below with the sklearn Naaive Bayes
classifier to classify the terrain data
The objective of this exercise is to recreate the decision
boundary found in the lesson video, and make a plot that
visually shows the decision boundary """
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture, output_image
from ClassifyNB import classify
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
clf = classify(features_train, labels_train)
############# your code goes below this line ###############
### draw the decision boundary with the text points overlaid
prettyPicture(clf, features_test, labels_test)
output_image("test.png", "png", open("test.png", "rb").read())
| {
"content_hash": "fd9054747225ef3f5e66257ef63b362e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 99,
"avg_line_length": 34.214285714285715,
"alnum_prop": 0.7167710508002784,
"repo_name": "napjon/moocs_solution",
"id": "78c853e30c18f8c852079dec5c59bfb19a7b6d12",
"size": "1456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml-udacity/naive_bayes/GaussianNB_Deployment_on_Terrain_Data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4046"
},
{
"name": "Jupyter Notebook",
"bytes": "9892144"
},
{
"name": "Matlab",
"bytes": "300578"
},
{
"name": "Python",
"bytes": "441602"
},
{
"name": "R",
"bytes": "7797"
},
{
"name": "Shell",
"bytes": "681"
}
],
"symlink_target": ""
} |
import os
def ck_preprocess(i):
def dep_env(dep, var): return i['deps'][dep]['dict']['env'].get(var)
LABELS_FILE = 'labels.txt'
WEIGHTS_DIR = dep_env('weights', 'CK_ENV_MOBILENET')
LIB_DIR = dep_env('library', 'CK_ENV_LIB_ARMCL')
LIB_NAME = dep_env('library', 'CK_ENV_LIB_ARMCL_DYNAMIC_CORE_NAME')
new_env = {}
files_to_push = []
push_weights_to_remote = True
push_libs_to_remote = True
if i['target_os_dict'].get('remote','') == 'yes':
# For Android weights and labels will be located near the executable
new_env['CK_ENV_WEIGHTS_DIR'] = '.'
new_env['CK_ENV_LABELS_FILE'] = LABELS_FILE
# Set list of additional files to be copied to Android device.
# We have to set these files via env variables with full paths
# in order to they will be copied into remote program dir without sub-paths.
if push_libs_to_remote:
new_env['CK_ENV_LABELS_FILE_PATH'] = os.path.join(os.getcwd(), '..', LABELS_FILE)
new_env['CK_ENV_ARMCL_CORE_LIB_PATH'] = os.path.join(LIB_DIR, 'lib', LIB_NAME)
files_to_push.append("$<<CK_ENV_LABELS_FILE_PATH>>$")
files_to_push.append("$<<CK_ENV_ARMCL_CORE_LIB_PATH>>$")
files_to_push.append("$<<CK_ENV_LIB_STDCPP_DYNAMIC>>$")
if push_weights_to_remote:
file_index = 0
for file_name in os.listdir(WEIGHTS_DIR):
if file_name.endswith('.npy'):
var_name = 'CK_ENV_WEIGHTS_' + str(file_index)
new_env[var_name] = os.path.join(WEIGHTS_DIR, file_name)
files_to_push.append('$<<' + var_name + '>>$')
file_index += 1
else:
new_env['CK_ENV_WEIGHTS_DIR'] = WEIGHTS_DIR
new_env['CK_ENV_LABELS_FILE'] = os.path.join('..', LABELS_FILE)
new_env['CK_ENV_RESOLUTION'] = dep_env('weights', 'CK_ENV_MOBILENET_RESOLUTION')
new_env['CK_ENV_MULTIPLIER'] = dep_env('weights', 'CK_ENV_MOBILENET_MULTIPLIER')
return {'return': 0, 'new_env': new_env, 'run_input_files': files_to_push}
| {
"content_hash": "903032061f2bb4335bbf953f563978d3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 87,
"avg_line_length": 41.319148936170215,
"alnum_prop": 0.6292481977342945,
"repo_name": "ctuning/ck-math",
"id": "ec9b4b7d83ed80b28636ab50b9842c8b43c0f868",
"size": "2121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "program/armcl-classification-mobilenet/preprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "53300"
},
{
"name": "C",
"bytes": "23697"
},
{
"name": "C++",
"bytes": "272261"
},
{
"name": "Jupyter Notebook",
"bytes": "186975"
},
{
"name": "Makefile",
"bytes": "1066"
},
{
"name": "Python",
"bytes": "164188"
},
{
"name": "Roff",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "82402"
}
],
"symlink_target": ""
} |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FrameSoundsForm(object):
def setupUi(self, FrameSoundsForm):
FrameSoundsForm.setObjectName("FrameSoundsForm")
FrameSoundsForm.resize(311, 161)
self.label = QtWidgets.QLabel(FrameSoundsForm)
self.label.setGeometry(QtCore.QRect(20, 10, 41, 16))
self.label.setObjectName("label")
self.frame_index_ = QtWidgets.QSpinBox(FrameSoundsForm)
self.frame_index_.setGeometry(QtCore.QRect(70, 10, 211, 22))
self.frame_index_.setAlignment(QtCore.Qt.AlignCenter)
self.frame_index_.setMaximum(100)
self.frame_index_.setObjectName("frame_index_")
self.label_2 = QtWidgets.QLabel(FrameSoundsForm)
self.label_2.setGeometry(QtCore.QRect(20, 60, 41, 16))
self.label_2.setObjectName("label_2")
self.frame_sound_ = QtWidgets.QLineEdit(FrameSoundsForm)
self.frame_sound_.setGeometry(QtCore.QRect(70, 60, 211, 20))
self.frame_sound_.setObjectName("frame_sound_")
self.btnCreate = QtWidgets.QPushButton(FrameSoundsForm)
self.btnCreate.setGeometry(QtCore.QRect(110, 130, 75, 23))
self.btnCreate.setObjectName("btnCreate")
self.retranslateUi(FrameSoundsForm)
QtCore.QMetaObject.connectSlotsByName(FrameSoundsForm)
def retranslateUi(self, FrameSoundsForm):
_translate = QtCore.QCoreApplication.translate
FrameSoundsForm.setWindowTitle(_translate("FrameSoundsForm", "FrameSoundsForm"))
self.label.setText(_translate("FrameSoundsForm", "帧序:"))
self.label_2.setText(_translate("FrameSoundsForm", "音乐:"))
self.btnCreate.setText(_translate("FrameSoundsForm", "创建"))
| {
"content_hash": "544e87d698882a1502e349f1b26d4af6",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 88,
"avg_line_length": 50.11764705882353,
"alnum_prop": 0.6977699530516432,
"repo_name": "ASMlover/study",
"id": "7ef29b22ba617af8c7c25fa4f266e8827550cf58",
"size": "1953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/editor/frame_sounds_ui.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3055440"
},
{
"name": "Batchfile",
"bytes": "4662"
},
{
"name": "Brainfuck",
"bytes": "571"
},
{
"name": "C",
"bytes": "13569580"
},
{
"name": "C#",
"bytes": "3959"
},
{
"name": "C++",
"bytes": "14741264"
},
{
"name": "CMake",
"bytes": "543917"
},
{
"name": "CSS",
"bytes": "11505"
},
{
"name": "Common Lisp",
"bytes": "114"
},
{
"name": "Emacs Lisp",
"bytes": "6042"
},
{
"name": "Go",
"bytes": "105203"
},
{
"name": "Groovy",
"bytes": "2907"
},
{
"name": "HTML",
"bytes": "911945"
},
{
"name": "Lex",
"bytes": "9370"
},
{
"name": "Lua",
"bytes": "32829"
},
{
"name": "Makefile",
"bytes": "1000611"
},
{
"name": "NASL",
"bytes": "3609"
},
{
"name": "NewLisp",
"bytes": "5805"
},
{
"name": "Perl",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2752752"
},
{
"name": "SWIG",
"bytes": "91"
},
{
"name": "Shell",
"bytes": "9993"
},
{
"name": "Vim script",
"bytes": "92204"
},
{
"name": "Yacc",
"bytes": "6278"
}
],
"symlink_target": ""
} |
import os
"""
Django settings for thebeau project on Heroku. Fore more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "8dlzqhz3zmzw)!ob^mt880fs0)_cqgtes+&z7gqlo%f*of#rw@"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_wysiwyg',
'tinymce',
'storages',
'construction',
'master',
'home',
)
DJANGO_WYSIWYG_FLAVOR = "tinymce"
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'thebeau.urls'
TEMPLATES = (
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.1and1.com'
EMAIL_HOST_USER = 'noreply@chateaudebeau.com'
EMAIL_HOST_PASSWORD = os.environ['MAIL_PW']
EMAIL_PORT = 587
EMAIL_USE_TLS = True
#EMAIL_USE_SSL = True
WSGI_APPLICATION = 'thebeau.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = (
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
)
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
"""
MEDIA_ROOT = "thebeau/static/"
#MEDIA_URL = "master/images/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
"""
AWS_STORAGE_BUCKET_NAME = os.environ['S3_BUCKET']
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
# Tell django-storages that when coming up with the URL for an item in S3 storage, keep
# it simple - just use this domain plus the path. (If this isn't set, things get complicated).
# This controls how the `static` template tag from `staticfiles` gets expanded, if you're using it.
# We also use it in the next setting.
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
# This is used by the `static` template tag from `static`, if you're using that. Or if anything else
# refers directly to STATIC_URL. So it's safest to always set it.
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = "https://%s/static/" % AWS_S3_CUSTOM_DOMAIN
# Tell the staticfiles app to use S3Boto storage when writing the collected static files (when
# you run `collectstatic`).
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
| {
"content_hash": "6505ae02673b7fc3838cdd2c3caee1f8",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 104,
"avg_line_length": 31.301136363636363,
"alnum_prop": 0.7028498820112543,
"repo_name": "j-windsor/thebeau",
"id": "3d3c1c362f7ff3b8a0f7499cc45579f11dec938b",
"size": "5509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thebeau/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86781"
},
{
"name": "HTML",
"bytes": "15007"
},
{
"name": "JavaScript",
"bytes": "16209"
},
{
"name": "Python",
"bytes": "15005"
}
],
"symlink_target": ""
} |
"""
WSGI config for webtox project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
sys.path.append('/home/shooz/webtox')
sys.path.append('/home/shooz/webtox/.env/lib/python3.5/site-packages')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webtox.settings")
application = get_wsgi_application()
| {
"content_hash": "1e181d06d632b166b71e7f3b94cb1cea",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 25.55,
"alnum_prop": 0.7651663405088063,
"repo_name": "SHooZ/WebTox",
"id": "0a21a757c898f00e91a3e2a647ba0ab03ea63067",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webtox/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3223"
},
{
"name": "JavaScript",
"bytes": "9039"
},
{
"name": "Makefile",
"bytes": "233"
},
{
"name": "Python",
"bytes": "13992"
}
],
"symlink_target": ""
} |
import os, sys, math, random
from collections import defaultdict
if sys.version_info[0] >= 3:
xrange = range
def exit_with_help(argv):
print("""\
Usage: {0} [options] dataset subset_size [output1] [output2]
This script randomly selects a subset of the dataset.
options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection
output1 : the subset (optional)
output2 : rest of the data (optional)
If output1 is omitted, the subset will be printed on the screen.""".format(argv[0]))
exit(1)
def process_options(argv):
argc = len(argv)
if argc < 3:
exit_with_help(argv)
# default method is stratified selection
method = 0
subset_file = sys.stdout
rest_file = None
i = 1
while i < argc:
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method not in [0, 1]:
print("Unknown selection method {0}".format(method))
exit_with_help(argv)
i = i + 1
dataset = argv[i]
subset_size = int(argv[i + 1])
if i + 2 < argc:
subset_file = open(argv[i + 2], 'w')
if i + 3 < argc:
rest_file = open(argv[i + 3], 'w')
return dataset, subset_size, method, subset_file, rest_file
def random_selection(dataset, subset_size):
l = sum(1 for line in open(dataset, 'r'))
return sorted(random.sample(xrange(l), subset_size))
def stratified_selection(dataset, subset_size):
labels = [line.split(None, 1)[0] for line in open(dataset)]
label_linenums = defaultdict(list)
for i, label in enumerate(labels):
label_linenums[label] += [i]
l = len(labels)
remaining = subset_size
ret = []
# classes with fewer data are sampled first; otherwise
# some rare classes may not be selected
for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])):
linenums = label_linenums[label]
label_size = len(linenums)
# at least one instance per class
s = int(min(remaining, max(1, math.ceil(label_size * (float(subset_size) / l)))))
if s == 0:
sys.stderr.write('''\
Error: failed to have at least one instance per class
1. You may have regression data.
2. Your classification data is unbalanced or too small.
Please use -s 1.
''')
sys.exit(-1)
remaining -= s
ret += [linenums[i] for i in random.sample(xrange(label_size), s)]
return sorted(ret)
def main(argv=sys.argv):
dataset, subset_size, method, subset_file, rest_file = process_options(argv)
# uncomment the following line to fix the random seed
# random.seed(0)
selected_lines = []
if method == 0:
selected_lines = stratified_selection(dataset, subset_size)
elif method == 1:
selected_lines = random_selection(dataset, subset_size)
#select instances based on selected_lines
dataset = open(dataset, 'r')
prev_selected_linenum = -1
for i in xrange(len(selected_lines)):
for cnt in xrange(selected_lines[i] - prev_selected_linenum - 1):
line = dataset.readline()
if rest_file:
rest_file.write(line)
subset_file.write(dataset.readline())
prev_selected_linenum = selected_lines[i]
subset_file.close()
if rest_file:
for line in dataset:
rest_file.write(line)
rest_file.close()
dataset.close()
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "02fb9ef51e704cad96c05564a9e25043",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 89,
"avg_line_length": 28.822580645161292,
"alnum_prop": 0.6088416340235031,
"repo_name": "kbiscanic/apt_project",
"id": "a8ae7d3920818a9221e36be1d5c23e2e756efef1",
"size": "3597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apt/libsvm/tools/subset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "71688"
},
{
"name": "C++",
"bytes": "96272"
},
{
"name": "HTML",
"bytes": "77794"
},
{
"name": "Java",
"bytes": "100815"
},
{
"name": "Makefile",
"bytes": "3593"
},
{
"name": "Matlab",
"bytes": "777"
},
{
"name": "Perl",
"bytes": "1281"
},
{
"name": "Python",
"bytes": "79626"
},
{
"name": "TeX",
"bytes": "36678"
}
],
"symlink_target": ""
} |
from collections import deque
import json
import gevent
import app as app_module
from flask_sockets import Sockets
from geventwebsocket.exceptions import WebSocketError
message_queue = deque()
def handle_socket_push(**kwargs):
message_queue.append(kwargs)
app_module.socket_push = handle_socket_push
sockets = Sockets(app_module.app)
@sockets.route('/display/socket')
def echo(ws):
while not ws.closed:
if len(message_queue) > 0:
blob = json.dumps(message_queue.popleft())
try:
ws.send(blob)
except WebSocketError:
break
gevent.sleep()
app = app_module.app
| {
"content_hash": "aede2d0d01d9e2e5782e2a623b98b035",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 54,
"avg_line_length": 20.59375,
"alnum_prop": 0.6631259484066768,
"repo_name": "Queens-Hacks/thoughtboard",
"id": "9b38c1ba20efe26f630bfd07a8357b5c2441bfb4",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socketed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "40947"
},
{
"name": "Python",
"bytes": "18349"
}
],
"symlink_target": ""
} |
import errno
import os
import uuid
import exceptions
from nose.plugins.attrib import attr
from manager_rest.test import base_test
from manager_rest import manager_exceptions
from manager_rest.constants import DEFAULT_TENANT_NAME
from manager_rest.constants import FILE_SERVER_DEPLOYMENTS_FOLDER
from cloudify_rest_client.exceptions import CloudifyClientError
#
TEST_PACKAGE_NAME = 'cloudify-script-plugin'
TEST_PACKAGE_VERSION = '1.2'
@attr(client_min_version=1, client_max_version=base_test.LATEST_API_VERSION)
class DeploymentsTestCase(base_test.BaseServerTestCase):
DEPLOYMENT_ID = 'deployment'
def test_get_empty(self):
result = self.client.deployments.list()
self.assertEquals(0, len(result))
def test_put(self):
(blueprint_id,
deployment_id,
blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
self.assertEquals(deployment_id, self.DEPLOYMENT_ID)
self.assertEquals(blueprint_id, deployment_response['blueprint_id'])
self.assertIsNotNone(deployment_response['created_at'])
self.assertIsNotNone(deployment_response['updated_at'])
@attr(client_min_version=3,
client_max_version=base_test.LATEST_API_VERSION)
def test_sort_list(self):
self.put_deployment(deployment_id='0', blueprint_id='0')
self.put_deployment(deployment_id='1', blueprint_id='1')
deployments = self.client.deployments.list(sort='created_at')
self.assertEqual(2, len(deployments))
self.assertEqual('0', deployments[0].id)
self.assertEqual('1', deployments[1].id)
deployments = self.client.deployments.list(
sort='created_at', is_descending=True)
self.assertEqual(2, len(deployments))
self.assertEqual('1', deployments[0].id)
self.assertEqual('0', deployments[1].id)
@attr(client_min_version=2.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_put_scaling_groups(self):
_, _, _, deployment_response = self.put_deployment(
self.DEPLOYMENT_ID,
blueprint_file_name='modify3-scale-groups.yaml')
self.assertIn('group', deployment_response['scaling_groups'])
def test_delete_blueprint_which_has_deployments(self):
(blueprint_id,
deployment_id,
blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
resp = self.delete('/blueprints/{0}'.format(blueprint_id))
self.assertEqual(400, resp.status_code)
self.assertTrue('There exist deployments for this blueprint' in
resp.json['message'])
self.assertEquals(
resp.json['error_code'],
manager_exceptions.DependentExistsError
.DEPENDENT_EXISTS_ERROR_CODE)
def test_deployment_already_exists(self):
(blueprint_id,
deployment_id,
blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
deployment_response = self.put(
'/deployments/{0}'.format(self.DEPLOYMENT_ID),
{'blueprint_id': blueprint_id})
self.assertTrue('already exists' in
deployment_response.json['message'])
self.assertEqual(409, deployment_response.status_code)
self.assertEqual(deployment_response.json['error_code'],
manager_exceptions.ConflictError.CONFLICT_ERROR_CODE)
def test_get_by_id(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
single_deployment = self.get('/deployments/{0}'
.format(deployment_id)).json
self.assertEquals(deployment_id, single_deployment['id'])
self.assertEquals(deployment_response['blueprint_id'],
single_deployment['blueprint_id'])
self.assertEquals(deployment_response['id'],
single_deployment['id'])
self.assertEquals(deployment_response['created_at'],
single_deployment['created_at'])
self.assertEquals(deployment_response['created_at'],
single_deployment['updated_at'])
def test_get(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
get_deployments_response = self.client.deployments.list()
self.assertEquals(1, len(get_deployments_response))
single_deployment = get_deployments_response[0]
self.assertEquals(deployment_id, single_deployment['id'])
self.assertEquals(deployment_response['blueprint_id'],
single_deployment['blueprint_id'])
self.assertEquals(deployment_response['id'],
single_deployment['id'])
self.assertEquals(deployment_response['created_at'],
single_deployment['created_at'])
self.assertEquals(deployment_response['created_at'],
single_deployment['updated_at'])
def test_get_executions_of_deployment(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
execution = self.client.executions.start(deployment_id, 'install')
self.assertEquals('install', execution.workflow_id)
self.assertEquals(blueprint_id, execution['blueprint_id'])
self.assertEquals(deployment_id, execution.deployment_id)
self.assertIsNotNone(execution.created_at)
executions = self.client.executions.list(deployment_id=deployment_id)
# expecting two executions - 'install' and
# 'create_deployment_environment'
self.assertEquals(2, len(executions))
self.assertIn(execution['id'],
[executions[0]['id'], executions[1]['id']])
self.assertIn('create_deployment_environment',
[executions[1]['workflow_id'],
executions[0]['workflow_id']])
def test_executing_nonexisting_workflow(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
try:
self.client.executions.start(deployment_id,
'nonexisting-workflow-id')
self.fail()
except CloudifyClientError, e:
self.assertEqual(400, e.status_code)
error = manager_exceptions.NonexistentWorkflowError
self.assertEquals(error.NONEXISTENT_WORKFLOW_ERROR_CODE,
e.error_code)
def test_listing_executions_for_nonexistent_deployment(self):
try:
self.client.executions.list(deployment_id='doesnotexist')
self.fail()
except CloudifyClientError, e:
self.assertEqual(404, e.status_code)
self.assertEquals(
manager_exceptions.NotFoundError.NOT_FOUND_ERROR_CODE,
e.error_code)
def test_get_workflows_of_deployment(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(
self.DEPLOYMENT_ID, 'blueprint_with_workflows.yaml')
resource_path = '/deployments/{0}'.format(deployment_id)
workflows = self.get(resource_path).json['workflows']
self.assertEquals(8, len(workflows))
workflow = next((workflow for workflow in workflows if
workflow['name'] == 'mock_workflow'), None)
self.assertIsNotNone(workflow)
self.assertTrue('created_at' in workflow)
parameters = {
'optional_param': {'default': 'test_default_value'},
'mandatory_param': {},
'mandatory_param2': {},
'nested_param': {
'default': {
'key': 'test_key',
'value': 'test_value'
}
}
}
self.assertEquals(parameters, workflow['parameters'])
def test_delete_deployment_verify_nodes_deletion(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
nodes = self.client.node_instances.list(deployment_id=deployment_id)
self.assertTrue(len(nodes) > 0)
nodes_ids = [node['id'] for node in nodes]
delete_deployment_response = self.delete(
'/deployments/{0}'.format(deployment_id),
query_params={'ignore_live_nodes': 'true'}).json
self.assertEquals(deployment_id, delete_deployment_response['id'])
# verifying deletion of deployment nodes and executions
for node_id in nodes_ids:
resp = self.get('/node-instances/{0}'.format(node_id))
self.assertEquals(404, resp.status_code)
def test_delete_deployment_with_live_nodes_without_ignore_flag(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
# modifying a node's state so there'll be a node in a state other
# than 'uninitialized'
nodes = self.client.node_instances.list(deployment_id=deployment_id)
resp = self.patch('/node-instances/{0}'.format(nodes[0]['id']), {
'version': 1,
'state': 'started'
})
self.assertEquals(200, resp.status_code)
# attempting to delete the deployment - should fail because there
# are live nodes for the deployment
delete_deployment_response = self.delete('/deployments/{0}'.format(
deployment_id))
self.assertEquals(400, delete_deployment_response.status_code)
self.assertEquals(delete_deployment_response.json['error_code'],
manager_exceptions.DependentExistsError
.DEPENDENT_EXISTS_ERROR_CODE)
def test_delete_deployment_with_uninitialized_nodes(self):
# simulates a deletion of a deployment right after its creation
# (i.e. all nodes are still in 'uninitialized' state because no
# execution has yet to take place)
self._test_delete_deployment_with_nodes_in_certain_state(
'uninitialized')
def test_delete_deployment_without_ignore_flag(self):
# simulates a deletion of a deployment after the uninstall workflow
# has completed (i.e. all nodes are in 'deleted' state)
self._test_delete_deployment_with_nodes_in_certain_state('deleted')
def _test_delete_deployment_with_nodes_in_certain_state(self, state):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
nodes = self.client.node_instances.list(deployment_id=deployment_id)
# modifying nodes states
for node in nodes:
resp = self.patch('/node-instances/{0}'.format(node['id']), {
'version': 1,
'state': state
})
self.assertEquals(200, resp.status_code)
# deleting the deployment
delete_deployment_response = self.delete('/deployments/{0}'.format(
deployment_id))
self.assertEquals(200, delete_deployment_response.status_code)
self.assertEquals(deployment_id,
delete_deployment_response.json['id'])
# verifying deletion of deployment
resp = self.get('/deployments/{0}'.format(deployment_id))
self.assertEquals(404, resp.status_code)
def test_delete_deployment_with_live_nodes_and_ignore_flag(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
delete_deployment_response = self.delete(
'/deployments/{0}'.format(deployment_id),
query_params={'ignore_live_nodes': 'true'}).json
self.assertEquals(deployment_id, delete_deployment_response['id'])
# verifying deletion of deployment
resp = self.get('/deployments/{0}'.format(deployment_id))
self.assertEquals(404, resp.status_code)
def test_delete_nonexistent_deployment(self):
# trying to delete a nonexistent deployment
resp = self.delete('/deployments/nonexistent-deployment')
self.assertEquals(404, resp.status_code)
self.assertEquals(
resp.json['error_code'],
manager_exceptions.NotFoundError.NOT_FOUND_ERROR_CODE)
def test_get_nodes_of_deployment(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
nodes = self.client.node_instances.list(deployment_id=deployment_id)
self.assertEquals(2, len(nodes))
def assert_node_exists(starts_with):
self.assertTrue(any(map(
lambda n: n['id'].startswith(starts_with), nodes)),
'Failed finding node with prefix {0}'.format(starts_with))
assert_node_exists('vm')
assert_node_exists('http_web_server')
def test_delete_deployment_folder_from_file_server(self):
(blueprint_id, deployment_id, blueprint_response,
deployment_response) = self.put_deployment(self.DEPLOYMENT_ID)
config = self.server_configuration
deployment_folder = os.path.join(config.file_server_root,
FILE_SERVER_DEPLOYMENTS_FOLDER,
DEFAULT_TENANT_NAME,
deployment_id)
try:
os.makedirs(deployment_folder)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(deployment_folder):
pass
else:
raise
deployment_resource_path = os.path.join(deployment_folder, 'test.txt')
print('Creating deployment resource: {0}'.format(
deployment_resource_path))
with open(deployment_resource_path, 'w') as f:
f.write('deployment resource')
self.client.deployments.delete(deployment_id)
self.assertFalse(os.path.exists(deployment_folder))
def test_inputs(self):
self.put_deployment(
blueprint_file_name='blueprint_with_inputs.yaml',
blueprint_id='5566',
deployment_id=self.DEPLOYMENT_ID,
inputs={'http_web_server_port': '8080'})
node = self.client.nodes.get(self.DEPLOYMENT_ID, 'http_web_server')
self.assertEqual('8080', node.properties['port'])
try:
self.put_deployment(
blueprint_file_name='blueprint_with_inputs.yaml',
blueprint_id='1122',
deployment_id=self.DEPLOYMENT_ID,
inputs='illegal')
except CloudifyClientError, e:
self.assertTrue('inputs parameter is expected' in str(e))
try:
self.put_deployment(
blueprint_id='3344',
blueprint_file_name='blueprint_with_inputs.yaml',
deployment_id=self.DEPLOYMENT_ID,
inputs={'some_input': '1234'})
except CloudifyClientError, e:
self.assertIn('were not specified', str(e))
try:
self.put_deployment(
blueprint_id='7788',
blueprint_file_name='blueprint_with_inputs.yaml',
deployment_id=self.DEPLOYMENT_ID,
inputs={
'http_web_server_port': '1234',
'unknown_input': 'yey'
})
except CloudifyClientError, e:
self.assertTrue('Unknown input' in str(e))
def test_outputs(self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='blueprint_with_outputs.yaml',
blueprint_id=id_,
deployment_id=id_)
instances = self.client.node_instances.list(deployment_id=id_)
vm = [x for x in instances if x.node_id == 'vm'][0]
vm_props = {'ip': '10.0.0.1'}
self.client.node_instances.update(vm.id, runtime_properties=vm_props)
ws = [x for x in instances if x.node_id == 'http_web_server'][0]
ws_props = {'port': 8080}
self.client.node_instances.update(ws.id, runtime_properties=ws_props)
response = self.client.deployments.outputs.get(id_)
self.assertEqual(id_, response.deployment_id)
outputs = response.outputs
self.assertTrue('ip_address' in outputs)
self.assertTrue('port' in outputs)
self.assertEqual('10.0.0.1', outputs['ip_address'])
self.assertEqual(80, outputs['port'])
dep = self.client.deployments.get(id_)
self.assertEqual('Web site IP address.',
dep.outputs['ip_address']['description'])
self.assertEqual('Web site port.', dep.outputs['port']['description'])
endpoint = outputs['endpoint']
self.assertEqual('http', endpoint['type'])
self.assertEqual('10.0.0.1', endpoint['ip'])
self.assertEqual(8080, endpoint['port'])
self.assertEqual(81, outputs['port2'])
def test_illegal_output(self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='blueprint_with_illegal_output.yaml',
blueprint_id=id_,
deployment_id=id_)
try:
self.client.deployments.outputs.get(id_)
self.fail()
except CloudifyClientError, e:
self.assertEqual(400, e.status_code)
self.assertEqual(
manager_exceptions.DeploymentOutputsEvaluationError.ERROR_CODE,
e.error_code)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_failure_when_plugin_not_found_central_deployment(self):
from cloudify_rest_client.exceptions import DeploymentPluginNotFound
id_ = str(uuid.uuid4())
try:
self.put_deployment(
blueprint_file_name='deployment_with_source_plugin.yaml',
blueprint_id=id_,
deployment_id=id_)
raise exceptions.AssertionError(
"Expected DeploymentPluginNotFound error")
except DeploymentPluginNotFound, e:
self.assertEqual(412, e.status_code)
self.assertEqual(manager_exceptions.DeploymentPluginNotFound.
ERROR_CODE,
e.error_code)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_failure_when_plugin_not_found_host_agent(self):
from cloudify_rest_client.exceptions import DeploymentPluginNotFound
id_ = str(uuid.uuid4())
try:
self.put_deployment(
blueprint_file_name='deployment_'
'with_source_plugin_host_agent.yaml',
blueprint_id=id_,
deployment_id=id_)
raise exceptions.AssertionError(
"Expected DeploymentPluginNotFound error")
except DeploymentPluginNotFound, e:
self.assertEqual(412, e.status_code)
self.assertEqual(manager_exceptions.DeploymentPluginNotFound.
ERROR_CODE,
e.error_code)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_success_when_source_plugin_exists_on_manager(self):
self.upload_plugin(TEST_PACKAGE_NAME, TEST_PACKAGE_VERSION).json
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with_'
'existing_plugin_on_manager.yaml',
blueprint_id=id_,
deployment_id=id_)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_success_when_source_plugin_with_address_exists(self):
self.upload_plugin(TEST_PACKAGE_NAME, TEST_PACKAGE_VERSION).json
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with_source_address.yaml',
blueprint_id=id_,
deployment_id=id_)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_success_when_plugin_not_found_with_new_flag(self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with_source_plugin.yaml',
blueprint_id=id_,
deployment_id=id_,
skip_plugins_validation=True)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_failure_with_invalid_flag_argument(self):
id_ = str(uuid.uuid4())
try:
self.put_deployment(
blueprint_file_name='deployment_with_source_plugin.yaml',
blueprint_id=id_,
deployment_id=id_,
skip_plugins_validation='invalid_arg')
raise exceptions.AssertionError("Expected CloudifyClientError")
except CloudifyClientError, e:
self.assertEqual(400, e.status_code)
self.assertEqual(manager_exceptions.BadParametersError.
BAD_PARAMETERS_ERROR_CODE,
e.error_code)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_failure_without_skip_plugins_validation_argument(self):
id_ = str(uuid.uuid4())
self.put_blueprint('mock_blueprint',
'deployment_with_source_plugin.yaml', id_)
response = self.put('/deployments/{}'.format(id_),
{'blueprint_id': id_})
self.assertEqual('deployment_plugin_not_found',
response.json['error_code'])
self.assertEqual('412 PRECONDITION FAILED', response.status)
self.assertEqual(412, response.status_code)
@attr(client_min_version=1, client_max_version=3)
def test_creation_success_when_plugin_not_found_central_deployment_agent(
self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with_source_plugin.yaml',
blueprint_id=id_,
deployment_id=id_)
@attr(client_min_version=1, client_max_version=3)
def test_creation_success_when_plugin_not_found_host_agent(self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with'
'_source_plugin_host_agent.yaml',
blueprint_id=id_,
deployment_id=id_)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_success_when_diamond_plugin_in_blueprint(self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with_'
'diamond_as_source_plugin.yaml',
blueprint_id=id_,
deployment_id=id_)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_success_when_diamond_as_host_agent_in_blueprint(self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with_'
'diamond_as_host_agent.yaml',
blueprint_id=id_,
deployment_id=id_)
@attr(client_min_version=3.1,
client_max_version=base_test.LATEST_API_VERSION)
def test_creation_success_when_install_plugin_is_False(self):
id_ = str(uuid.uuid4())
self.put_deployment(
blueprint_file_name='deployment_with_'
'install_plugin_False.yaml',
blueprint_id=id_,
deployment_id=id_)
| {
"content_hash": "b5374da53e850f7b86f8424ac2e8c2f9",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 79,
"avg_line_length": 43.03900709219858,
"alnum_prop": 0.6087171459174425,
"repo_name": "isaac-s/cloudify-manager",
"id": "d096d8871fa697740200c934ba929188cf28f5f7",
"size": "24912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest-service/manager_rest/test/endpoints/test_deployments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Mako",
"bytes": "541"
},
{
"name": "Python",
"bytes": "1793118"
},
{
"name": "Ruby",
"bytes": "40193"
},
{
"name": "Shell",
"bytes": "41526"
}
],
"symlink_target": ""
} |
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.modules.vistrails_module import Module
from vistrails.core.modules.basic_modules import Constant
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.system import get_elementtree_library
from vistrails.core.utils.color import ColorByName
from vistrails.gui.modules.constant_configuration import ConstantWidgetMixin
import vtk
import math
import pickle
import copy
import StringIO
import unittest
ElementTree = get_elementtree_library()
################################################################################
# etc
def clamp(v, mn, mx, eps=0.0):
mne = mn + eps
mxe = mx - eps
if v < mne: return mn
if v > mxe: return mx
return v
# Because of a Qt bug see
# http://bugreports.qt.nokia.com/browse/QTBUG-17985
# We cannot set the scene from 0 to 1. In this case we will set it
# 4000 x 4000 with GLOBAL_SCALE. When the bug is fixed, just set it to 1.0
GLOBAL_SCALE = 4000.0
##############################################################################
# Transfer Function object
class TransferFunction(object):
def __init__(self):
self._min_range = 0.0
self._max_range = 1.0
self._pts = []
def set_range(self, mn, mx):
self._min_range = mn
self._max_range = mx
def set_on_vtk_volume_property(self, vtk_volume_property):
# Builds the opacity and color functions
of = vtk.vtkPiecewiseFunction()
cf = vtk.vtkColorTransferFunction()
vp = vtk_volume_property
for pt in self._pts:
(scalar, opacity, color) = pt
# Map scalar to tf range
s = self._min_range + (self._max_range - self._min_range) * scalar
of.AddPoint(s, opacity)
cf.AddRGBPoint(s, color[0], color[1], color[2])
vp.SetScalarOpacity(of)
vp.SetColor(cf)
def get_vtk_transfer_functions(self):
of = vtk.vtkPiecewiseFunction()
cf = vtk.vtkColorTransferFunction()
for pt in self._pts:
(scalar, opacity, color) = pt
# Map scalar to tf range
s = self._min_range + (self._max_range - self._min_range) * scalar
of.AddPoint(s, opacity)
cf.AddRGBPoint(s, color[0], color[1], color[2])
return (of,cf)
def add_point(self, scalar, opacity, color):
self._pts.append((scalar, opacity, color))
self._pts.sort()
def get_value(self, scalar):
"""get_value(scalar): returns the opacity and color
linearly interpolated at the value. Useful for
adding knots."""
ix = 0
while ix < len(self._pts) and self._pts[ix][0] > scalar:
ix += 1
if ix == 0:
return (self._pts[0][1], self._pts[0][2])
elif ix == len(self._pts):
return (self._pts[-1][1], self._pts[-1][2])
else:
u = ((self._pts[ix][0] - scalar) /
(self._pts[ix][0] - self._pts[ix-1][0]))
do = self._pts[ix][1] - self._pts[ix-1][1]
dr = self._pts[ix][2][0] - self._pts[ix-1][2][0]
dg = self._pts[ix][2][1] - self._pts[ix-1][2][1]
db = self._pts[ix][2][2] - self._pts[ix-1][2][2]
return (self._pts[ix-1][1] + u * do,
(self._pts[ix-1][2][0] + u * dr,
self._pts[ix-1][2][1] + u * dg,
self._pts[ix-1][2][2] + u * db))
def __copy__(self):
result = TransferFunction()
result._min_range = self._min_range
result._max_range = self._max_range
result._pts = copy.copy(self._pts)
return result
def __eq__(self, other):
if type(other) != type(self):
return False
if self._min_range != other._min_range:
return False
if self._max_range != other._max_range:
return False
for my_pt, other_pt in zip(self._pts, other._pts):
if my_pt != other_pt:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def serialize(self, node=None):
"""serialize(node: ElementTree.Element) -> str
Convert this object to an XML representation in string format.
"""
if node is None:
node = ElementTree.Element('transfer_function')
node.set('min_range', str(self._min_range))
node.set('max_range', str(self._max_range))
for pt in self._pts:
ptNode = ElementTree.SubElement(node, 'point')
ptNode.set('scalar', str(pt[0]))
ptNode.set('opacity', str(pt[1]))
color = pt[2]
colorNode = ElementTree.SubElement(ptNode, 'color')
colorNode.set('R', str(color[0]))
colorNode.set('G', str(color[1]))
colorNode.set('B', str(color[2]))
return ElementTree.tostring(node)
@staticmethod
def parse(strNode):
"""parse(strNode: str) -> TransferFunction
Parses a string representing a TransferFunction and returns a
TransferFunction object
"""
try:
node = ElementTree.fromstring(strNode)
except SyntaxError:
#it was serialized using pickle
class FixUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'packages.vtk.tf_widget':
module = 'vistrails.packages.vtk.tf_widget'
return pickle.Unpickler.find_class(self, module, name)
tf = FixUnpickler(StringIO.StringIO(strNode.decode('hex'))).load()
tf._pts.sort()
return tf
if node.tag != 'transfer_function':
return None
#read attributes
tf = TransferFunction()
tf._min_range = float(node.get('min_range', "0.0"))
tf._max_range = float(node.get('max_range', "1.0"))
for ptNode in node.getchildren():
if ptNode.tag == 'point':
scalar = float(ptNode.get('scalar','-1.0'))
opacity = float(ptNode.get('opacity', '1.0'))
for colorNode in ptNode.getchildren():
if colorNode.tag == 'color':
color = (float(colorNode.get('R','0.0')),
float(colorNode.get('G','0.0')),
float(colorNode.get('B','0.0')))
break
else:
assert "'point' node has no 'color' child"
tf._pts.append((scalar,opacity,color))
tf._pts.sort()
return tf
##############################################################################
# Graphics Items
class TransferFunctionPoint(QtGui.QGraphicsEllipseItem):
selection_pens = { True: QtGui.QPen(QtGui.QBrush(
QtGui.QColor(*(ColorByName.get_int('goldenrod_medium')))),GLOBAL_SCALE * 0.012),
False: QtGui.QPen() }
def __init__(self, scalar, opacity, color, parent=None):
QtGui.QGraphicsEllipseItem.__init__(self, parent)
self._scalar = scalar
self._opacity = opacity
self._color = QtGui.QColor(color[0]*255,
color[1]*255,
color[2]*255)
self.setPen(QtGui.QPen(QtGui.QColor(0,0,0)))
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)
self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable)
if QtCore.QT_VERSION >= 0x40600:
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges)
self.setZValue(2.0)
self._sx = 1.0
self._sy = 1.0
# fixed scale
self._fsx = GLOBAL_SCALE
self._fsy = GLOBAL_SCALE
self._left_line = None
self._right_line = None
self._point = QtCore.QPointF(scalar * self._fsx, opacity * self._fsy)
self.refresh()
self.setToolTip("Double-click to change color\n"
"Right-click to remove point\n"
"Scalar: %.5f, Opacity: %.5f" % (self._scalar,
self._opacity))
# This sets up the linked list of Lines
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Backspace or \
event.key() == QtCore.Qt.Key_Delete:
self.remove_self()
def refresh(self):
dx = self._fsx * 0.025 / self._sx
dy = self._fsy * 0.025 / self._sy
# this is the setup
self.setBrush(QtGui.QBrush(self._color))
self.setRect(-dx,
-dy,
2 * dx, 2 * dy)
self.setPos(self._fsx * self._scalar,
self._fsy * self._opacity)
self.update()
def update_scale(self, sx, sy):
self._sx = sx
self._sy = sy
self.refresh()
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemSelectedChange:
self.setPen(self.selection_pens[value])
if change == QtGui.QGraphicsItem.ItemPositionChange:
# moves point
# value is now a QPointF, not a QPoint so no conversion needed
pt = value
pt.setY(clamp(pt.y(), 0.0, 1.0 * self._fsy) )
self._opacity = pt.y() / self._fsy
self._point.setY(pt.y())
if not self._left_line:
pt.setX(0.0)
elif not self._right_line:
pt.setX(1.0 * self._fsx)
else:
assert self._left_line._point_right == self
assert self._right_line._point_left == self
pt.setX(clamp(pt.x(),
self._left_line._point_left._point.x(),
self._right_line._point_right._point.x(),
1e-6))
self._point.setX(pt.x())
self._scalar = pt.x() / self._fsx
if self._left_line:
self._left_line.refresh()
if self._right_line:
self._right_line.refresh()
if self.parentItem():
self.parentItem()._tf_poly.setup()
self.setToolTip("Double-click to change color\n"
"Right-click to remove point\n"
"Scalar: %.5f, Opacity: %.5f" % (self._scalar,
self._opacity))
return QtGui.QGraphicsItem.itemChange(self, change, pt)
return QtGui.QGraphicsItem.itemChange(self, change, value)
def remove_self(self):
if not self._left_line or not self._right_line:
# Ignore, self is a corner node that can't be removed
return
# Removes the right line and self, re-ties data structure
self._left_line._point_right = self._right_line._point_right
self._left_line._point_right._left_line = self._left_line
# be friends with garbage collector
self._right_line._point_left = None
self._right_line._point_right = None
self.parentItem()._tf_poly.setup()
self.scene().removeItem(self._right_line)
self.scene().removeItem(self)
self._left_line.refresh()
def mouseDoubleClickEvent(self, event):
new_color = QtGui.QColorDialog.getColor(self._color)
if not new_color.isValid():
return
self._color = new_color
if self._left_line:
self._left_line.refresh()
if self._right_line:
self._right_line.refresh()
self.refresh()
# sometimes the graphicsitem gets recreated, and we need to abort
if self.parentItem():
self.parentItem()._tf_poly.setup()
QtGui.QGraphicsEllipseItem.mouseDoubleClickEvent(self, event)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
event.accept()
self.remove_self()
else:
QtGui.QGraphicsEllipseItem.mousePressEvent(self, event)
def paint(self, painter, option, widget=None):
""" paint(painter: QPainter, option: QStyleOptionGraphicsItem,
widget: QWidget) -> None
Peform painting of the point without the ugly default dashed-line black
square
"""
painter.setBrush(self.brush())
painter.setPen(self.pen())
painter.drawEllipse(self.rect())
def add_self_to_transfer_function(self, tf):
tf.add_point(self._scalar,
self._opacity,
(self._color.redF(),
self._color.greenF(),
self._color.blueF()))
class TransferFunctionPolygon(QtGui.QGraphicsPolygonItem):
def __init__(self, parent=None):
QtGui.QGraphicsPolygonItem.__init__(self, parent)
def setup(self):
# This inspects the scene, finds the left-most point, and
# then builds the polygon traversing the linked list structure
pt = self.parentItem().get_leftmost_point()
if not pt:
return
self.setZValue(1.25)
g = QtGui.QLinearGradient()
g.setStart(0.0, 0.5)
g.setFinalStop(1.0, 0.5)
g.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
p = QtGui.QPen()
p.setStyle(QtCore.Qt.NoPen)
pts = [QtCore.QPointF(pt.x(), 0)]
self.setPen(p)
while 1:
c = QtGui.QColor(pt._color)
c.setAlphaF(pt._opacity)
g.setColorAt(pt._scalar, c)
pts.append(pt._point)
# move cursor fwd
if pt._right_line:
pt = pt._right_line._point_right
else:
break
self.setBrush(QtGui.QBrush(g))
pts.append(QtCore.QPointF(pt._point.x(), 0))
polygon = QtGui.QPolygonF(pts)
self.setPolygon(polygon)
class TransferFunctionLine(QtGui.QGraphicsPolygonItem):
def __init__(self, point_left, point_right, parent=None):
assert point_right._scalar >= point_left._scalar
QtGui.QGraphicsPolygonItem.__init__(self, parent)
self._point_left = point_left
self._point_right = point_right
self._point_left._right_line = self
self._point_right._left_line = self
self.setup(1.0, 1.0)
self._sx = 1.0
self._sy = 1.0
# fixed scale
self._fsx = GLOBAL_SCALE
self._fsy = GLOBAL_SCALE
self.setToolTip('')
def setup(self, sx, sy):
d = self._point_right._point - self._point_left._point
d_normal = QtCore.QPointF(d.y(), -d.x())
l = math.sqrt(d.x() * d.x() + d.y() * d.y())
if l != 0.0:
d_normal /= l
d_normal *= GLOBAL_SCALE * 0.010
d_normal.setX(d_normal.x() / sx)
d_normal.setY(d_normal.y() / sy)
ps = [self._point_left._point + d_normal,
self._point_right._point + d_normal,
self._point_right._point - d_normal,
self._point_left._point - d_normal]
self.setPolygon(QtGui.QPolygonF(ps))
self.setZValue(1.5)
# Gradient for filling
g = QtGui.QLinearGradient()
g.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
g.setStart(self._point_left._scalar, self._point_left._opacity)
g.setFinalStop(self._point_right._scalar, self._point_right._opacity)
g.setColorAt(0.0, self._point_left._color)
g.setColorAt(1.0, self._point_right._color)
self.setBrush(QtGui.QBrush(g))
# Gradient for outlining
g = QtGui.QLinearGradient()
g.setStart(self._point_left._point)
g.setFinalStop(self._point_right._point)
dark_pl = QtGui.QColor(self._point_left._color.red() * 0.5,
self._point_left._color.green() * 0.5,
self._point_left._color.blue() * 0.5)
dark_pr = QtGui.QColor(self._point_right._color.red() * 0.5,
self._point_right._color.green() * 0.5,
self._point_right._color.blue() * 0.5)
g.setColorAt(0.0, dark_pl)
g.setColorAt(1.0, dark_pr)
p = QtGui.QPen()
p.setBrush(QtGui.QBrush(g))
self.setPen(p)
def update_scale(self, sx, sy):
self._sx = sx
self._sy = sy
self.refresh()
def refresh(self):
self.setup(self._sx, self._sy)
def mouseDoubleClickEvent(self, event):
p = event.pos()
c_left = self._point_left._color
c_right = self._point_right._color
u = ((p.x() - self._point_left._point.x()) /
(self._point_right._point.x() - self._point_left._point.x()))
new_c = (u * c_right.redF() + (1-u) * c_left.redF(),
u * c_right.greenF() + (1-u) * c_left.greenF(),
u * c_right.blueF() + (1-u) * c_left.blueF())
new_point = TransferFunctionPoint(p.x()/ self._fsx, p.y()/self._fsy, new_c, self.parentItem())
self.parentItem()._tf_items.append(new_point)
new_line = TransferFunctionLine(new_point, self._point_right, self.parentItem())
self.parentItem()._tf_items.append(new_line)
new_point._left_line = self
self._point_right = new_point
new_line.update_scale(self._point_left._sx,
self._point_left._sy)
new_point.update_scale(self._point_left._sx,
self._point_left._sy)
new_point.refresh()
self.refresh()
def mousePressEvent(self, event):
# This needs to be here, otherwise mouseDoubleClickEvent does
# not get called.
event.accept()
##############################################################################
# Scene, view, widget
class QGraphicsTransferFunction(QtGui.QGraphicsWidget, ConstantWidgetMixin):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QGraphicsWidget.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
self.setAcceptHoverEvents(True)
if not param.strValue:
self._tf = copy.copy(default_tf)
else:
self._tf = TransferFunction.parse(param.strValue)
self._tf_items = []
poly = TransferFunctionPolygon(self)
poly.setup()
self._tf_poly = poly
self.create_tf_items(self._tf)
self._tf_poly.setup()
#current scale
self._sx = 1.0
self._sy = 1.0
# Add outlines
line_color = QtGui.QColor(200, 200, 200)
pen = QtGui.QPen(line_color)
ps = [QtCore.QPointF(0.0, 0.0),
QtCore.QPointF(GLOBAL_SCALE, 0.0),
QtCore.QPointF(GLOBAL_SCALE, GLOBAL_SCALE),
QtCore.QPointF(0.0, GLOBAL_SCALE)]
polygon = QtGui.QGraphicsPolygonItem(QtGui.QPolygonF(ps), self)
polygon.setPen(pen)
for i in xrange(51):
u = GLOBAL_SCALE * float(i) / 50.0
line = QtGui.QGraphicsLineItem(QtCore.QLineF(u, 0.0, u, GLOBAL_SCALE), self)
line.setPen(pen)
line = QtGui.QGraphicsLineItem(QtCore.QLineF(0.0, u, GLOBAL_SCALE, u), self)
line.setPen(pen)
self.setGeometry(self.boundingRect())
# restore y axis inversion
self.setTransform(QtGui.QTransform(1, 0, 0, -1, 0, GLOBAL_SCALE))
self.setTransformOriginPoint(0, GLOBAL_SCALE)
self.reset_transfer_function(self._tf)
def boundingRect(self):
return QtCore.QRectF(0.0, 0.0, GLOBAL_SCALE, GLOBAL_SCALE)
def reset_transfer_function(self, tf):
self.create_tf_items(tf)
self.update_scale(self._sx, self._sy)
self._tf_poly.setup()
def create_tf_items(self, tf):
if self._tf_items and not self.scene(): # not added to scene yet
return
items = copy.copy(self._tf_items)
for item in items:
self.scene().removeItem(item)
self._tf_items = []
if len(tf._pts) == 0:
pt_left = TransferFunctionPoint(0.0, 0.0, (0.0, 0.0, 0.0), self)
self._tf_items.append(pt_left)
pt_right = TransferFunctionPoint(1.0, 0.0, (0.0, 0.0, 0.0), self)
self._tf_items.append(pt_right)
self._tf_items.append(TransferFunctionLine(pt_left, pt_right, self))
else:
pts = [TransferFunctionPoint(*pt, parent=self)
for pt in tf._pts]
self._tf_items.extend(pts)
lns = [TransferFunctionLine(pt_l, pt_r, self)
for (pt_l, pt_r) in zip(pts[:-1], pts[1:])]
self._tf_items.extend(lns)
def add_knot(self, scalar, opacity):
pass
def update_scale(self, sx, sy):
for item in self._tf_items:
item.update_scale(sx, sy)
self._sx = sx
self._sy = sy
def get_leftmost_point(self):
pt = None
for item in self._tf_items:
if hasattr(item, '_left_line') and not item._left_line:
pt = item
break
return pt
def get_transfer_function(self):
result = TransferFunction()
pt = self.get_leftmost_point()
while 1:
pt.add_self_to_transfer_function(result)
if pt._right_line:
pt = pt._right_line._point_right
else:
break
return result
def contents(self):
return self.get_transfer_function().serialize()
def setContents(self, strValue, silent=True):
if not strValue:
self._tf = copy.copy(default_tf)
else:
self._tf = TransferFunction.parse(strValue)
self.reset_transfer_function(self._tf)
if not silent:
self.update_parent()
def hoverLeaveEvent(self, event):
self.update_parent()
QtGui.QGraphicsWidget.hoverLeaveEvent(self, event)
class TransferFunctionScene(QtGui.QGraphicsScene):
def __init__(self, param, parent=None):
QtGui.QGraphicsScene.__init__(self, parent)
self.tf = QGraphicsTransferFunction(param)
self.addItem(self.tf)
class TransferFunctionView(QtGui.QGraphicsView):
def __init__(self, parent=None):
QtGui.QGraphicsView.__init__(self, parent)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
def resizeEvent(self, event):
self.resetMatrix()
self.setMatrix(QtGui.QMatrix(event.size().width() / (GLOBAL_SCALE *10.0/9) , 0,
0, event.size().height() / (GLOBAL_SCALE*10.0/9), GLOBAL_SCALE, 0))
self.scene().tf.update_scale(event.size().width()/(2000.0/9), event.size().height()/(2000.0/9))
def focusOutEvent(self, event):
self.parent().update_parent()
QtGui.QGraphicsView.focusOutEvent(self, event)
default_tf = TransferFunction()
default_tf.add_point(0.0, 0.0, (0.0, 0.0, 0.0))
default_tf.add_point(1.0, 0.0, (0.0, 0.0, 0.0))
class TransferFunctionWidget(QtGui.QWidget, ConstantWidgetMixin):
contentsChanged = QtCore.pyqtSignal(tuple)
GraphicsItem = QGraphicsTransferFunction
def __init__(self, param, parent=None):
QtGui.QWidget.__init__(self, parent)
self._scene = TransferFunctionScene(param, self)
self._scene.tf.update_parent = self.update_parent
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
self._view = TransferFunctionView(self)
self._view.setScene(self._scene)
self._view.setMinimumSize(200,200)
self._view.setMaximumHeight(280)
self._view.show()
self._view.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# TODO remove this
self._view.setMatrix(QtGui.QMatrix(1, 0, 0, -1, GLOBAL_SCALE, 0))
self.setMinimumSize(260,240)
caption = QtGui.QLabel("Double-click on the line to add a point")
font = QtGui.QFont('Arial', 11)
font.setItalic(True)
caption.setFont(font)
layout.addWidget(self._view)
layout.addWidget(caption)
def contents(self):
return self._scene.tf.contents()
def setContents(self, strValue, silent=True):
self._scene.tf.setContents(strValue, silent)
def set_last_contents(self, contents):
self._scene.tf._last_contents = contents
def get_last_contents(self):
return self._scene.tf._last_contents
_last_contents = property(get_last_contents, set_last_contents)
##############################################################################
# Helper module to adjust range
class vtkScaledTransferFunction(Module):
# FIXME Add documentation
_input_ports = [
['Input', 'vtkAlgorithmOutput'],
['Dataset', 'vtkDataObject'],
['Range', '(basic:Float, basic:Float)'],
['TransferFunction', 'TransferFunction']]
_output_ports = [
['TransferFunction', 'TransferFunction'],
['vtkPiecewiseFunction', 'vtkPiecewiseFunction'],
['vtkColorTransferFunction', 'vtkColorTransferFunction']]
def compute(self):
reg = get_module_registry()
tf = self.get_input('TransferFunction')
new_tf = copy.copy(tf)
if self.has_input('Input'):
port = self.get_input('Input')
algo = port.GetProducer()
output = algo.GetOutput(port.GetIndex())
(new_tf._min_range, new_tf._max_range) = output.GetScalarRange()
elif self.has_input('Dataset'):
algo = self.get_input('Dataset')
output = algo
(new_tf._min_range, new_tf._max_range) = output.GetScalarRange()
else:
(new_tf._min_range, new_tf._max_range) = self.get_input('Range')
self.set_output('TransferFunction', new_tf)
(of,cf) = new_tf.get_vtk_transfer_functions()
self.set_output('vtkPicewiseFunction', of)
self.set_output('vtkColorTransferFunction', cf)
class TransferFunctionConstant(Constant):
default_value = default_tf
@staticmethod
def translate_to_python(x):
return TransferFunction.parse(x)
@staticmethod
def translate_to_string(x):
return x.serialize()
@staticmethod
def validate(x):
return isinstance(x, TransferFunction)
@staticmethod
def get_widget_class():
return TransferFunctionWidget
##############################################################################
class TestTransferFunction(unittest.TestCase):
def test_serialization(self):
tf = TransferFunction()
tf._min_range = 0.1
tf._max_range = 2.0
tf._pts.append((0.3,0.5,(1.0,1.0,1.0)))
tf._pts.append((0.6,0.7,(1.0,0.5,1.0)))
tf._pts.append((0.2,0.8,(1.0,0.0,1.0)))
tf._pts.sort()
#simulate old serialization method
ser1 = pickle.dumps(tf).encode('hex')
ser2 = tf.serialize()
tf1 = TransferFunction.parse(ser1)
tf2 = TransferFunction.parse(ser2)
assert tf == tf1
assert tf == tf2
assert tf1 == tf2
TransferFunctionConstant.__name__ = "TransferFunction"
_modules = [TransferFunctionConstant, vtkScaledTransferFunction]
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "01cecb83d7e4da5c97bab76f1739cf89",
"timestamp": "",
"source": "github",
"line_count": 739,
"max_line_length": 104,
"avg_line_length": 37.529093369418135,
"alnum_prop": 0.5534722723011466,
"repo_name": "hjanime/VisTrails",
"id": "f2d86888793796af9782665679ad0273cffe9d6c",
"size": "29762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/packages/vtk/tf_widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
"""UniFi Network sensor platform tests."""
from datetime import datetime
from unittest.mock import patch
from aiounifi.models.message import MessageKey
import pytest
from homeassistant.components.device_tracker import DOMAIN as TRACKER_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import EntityCategory
import homeassistant.util.dt as dt_util
from .test_controller import setup_unifi_integration
async def test_no_clients(hass, aioclient_mock):
"""Test the update_clients function when no clients are found."""
await setup_unifi_integration(
hass,
aioclient_mock,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
async def test_bandwidth_sensors(hass, aioclient_mock, mock_unifi_websocket):
"""Verify that bandwidth sensors are working as expected."""
wired_client = {
"hostname": "Wired client",
"is_wired": True,
"mac": "00:00:00:00:00:01",
"oui": "Producer",
"wired-rx_bytes-r": 1234000000,
"wired-tx_bytes-r": 5678000000,
}
wireless_client = {
"is_wired": False,
"mac": "00:00:00:00:00:02",
"name": "Wireless client",
"oui": "Producer",
"rx_bytes-r": 2345000000,
"tx_bytes-r": 6789000000,
}
options = {
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: False,
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
}
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
options=options,
clients_response=[wired_client, wireless_client],
)
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 4
assert hass.states.get("sensor.wired_client_rx").state == "1234.0"
assert hass.states.get("sensor.wired_client_tx").state == "5678.0"
assert hass.states.get("sensor.wireless_client_rx").state == "2345.0"
assert hass.states.get("sensor.wireless_client_tx").state == "6789.0"
ent_reg = er.async_get(hass)
assert (
ent_reg.async_get("sensor.wired_client_rx").entity_category
is EntityCategory.DIAGNOSTIC
)
# Verify state update
wireless_client["rx_bytes-r"] = 3456000000
wireless_client["tx_bytes-r"] = 7891000000
mock_unifi_websocket(message=MessageKey.CLIENT, data=wireless_client)
await hass.async_block_till_done()
assert hass.states.get("sensor.wireless_client_rx").state == "3456.0"
assert hass.states.get("sensor.wireless_client_tx").state == "7891.0"
# Disable option
options[CONF_ALLOW_BANDWIDTH_SENSORS] = False
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
assert hass.states.get("sensor.wireless_client_rx") is None
assert hass.states.get("sensor.wireless_client_tx") is None
assert hass.states.get("sensor.wired_client_rx") is None
assert hass.states.get("sensor.wired_client_tx") is None
# Enable option
options[CONF_ALLOW_BANDWIDTH_SENSORS] = True
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 4
assert hass.states.get("sensor.wireless_client_rx")
assert hass.states.get("sensor.wireless_client_tx")
assert hass.states.get("sensor.wired_client_rx")
assert hass.states.get("sensor.wired_client_tx")
# Try to add the sensors again, using a signal
clients_connected = {wired_client["mac"], wireless_client["mac"]}
devices_connected = set()
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
async_dispatcher_send(
hass,
controller.signal_update,
clients_connected,
devices_connected,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 4
@pytest.mark.parametrize(
"initial_uptime,event_uptime,new_uptime",
[
# Uptime listed in epoch time should never change
(1609462800, 1609462800, 1612141200),
# Uptime counted in seconds increases with every event
(60, 64, 60),
],
)
async def test_uptime_sensors(
hass,
aioclient_mock,
mock_unifi_websocket,
initial_uptime,
event_uptime,
new_uptime,
):
"""Verify that uptime sensors are working as expected."""
uptime_client = {
"mac": "00:00:00:00:00:01",
"name": "client1",
"oui": "Producer",
"uptime": initial_uptime,
}
options = {
CONF_ALLOW_BANDWIDTH_SENSORS: False,
CONF_ALLOW_UPTIME_SENSORS: True,
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
}
now = datetime(2021, 1, 1, 1, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.now", return_value=now):
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
options=options,
clients_response=[uptime_client],
)
assert len(hass.states.async_all()) == 2
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 1
assert hass.states.get("sensor.client1_uptime").state == "2021-01-01T01:00:00+00:00"
ent_reg = er.async_get(hass)
assert (
ent_reg.async_get("sensor.client1_uptime").entity_category
is EntityCategory.DIAGNOSTIC
)
# Verify normal new event doesn't change uptime
# 4 seconds has passed
uptime_client["uptime"] = event_uptime
now = datetime(2021, 1, 1, 1, 1, 4, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.now", return_value=now):
mock_unifi_websocket(message=MessageKey.CLIENT, data=uptime_client)
await hass.async_block_till_done()
assert hass.states.get("sensor.client1_uptime").state == "2021-01-01T01:00:00+00:00"
# Verify new event change uptime
# 1 month has passed
uptime_client["uptime"] = new_uptime
now = datetime(2021, 2, 1, 1, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.now", return_value=now):
mock_unifi_websocket(message=MessageKey.CLIENT, data=uptime_client)
await hass.async_block_till_done()
assert hass.states.get("sensor.client1_uptime").state == "2021-02-01T01:00:00+00:00"
# Disable option
options[CONF_ALLOW_UPTIME_SENSORS] = False
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
assert hass.states.get("sensor.client1_uptime") is None
# Enable option
options[CONF_ALLOW_UPTIME_SENSORS] = True
with patch("homeassistant.util.dt.now", return_value=now):
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 1
assert hass.states.get("sensor.client1_uptime")
# Try to add the sensors again, using a signal
clients_connected = {uptime_client["mac"]}
devices_connected = set()
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
async_dispatcher_send(
hass,
controller.signal_update,
clients_connected,
devices_connected,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 1
async def test_remove_sensors(hass, aioclient_mock, mock_unifi_websocket):
"""Verify removing of clients work as expected."""
wired_client = {
"hostname": "Wired client",
"is_wired": True,
"mac": "00:00:00:00:00:01",
"oui": "Producer",
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
"uptime": 1600094505,
}
wireless_client = {
"is_wired": False,
"mac": "00:00:00:00:00:02",
"name": "Wireless client",
"oui": "Producer",
"rx_bytes": 2345000000,
"tx_bytes": 6789000000,
"uptime": 60,
}
await setup_unifi_integration(
hass,
aioclient_mock,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
clients_response=[wired_client, wireless_client],
)
assert len(hass.states.async_all()) == 9
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 6
assert len(hass.states.async_entity_ids(TRACKER_DOMAIN)) == 2
assert hass.states.get("sensor.wired_client_rx")
assert hass.states.get("sensor.wired_client_tx")
assert hass.states.get("sensor.wired_client_uptime")
assert hass.states.get("sensor.wireless_client_rx")
assert hass.states.get("sensor.wireless_client_tx")
assert hass.states.get("sensor.wireless_client_uptime")
# Remove wired client
mock_unifi_websocket(message=MessageKey.CLIENT_REMOVED, data=wired_client)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 3
assert len(hass.states.async_entity_ids(TRACKER_DOMAIN)) == 1
assert hass.states.get("sensor.wired_client_rx") is None
assert hass.states.get("sensor.wired_client_tx") is None
assert hass.states.get("sensor.wired_client_uptime") is None
assert hass.states.get("sensor.wireless_client_rx")
assert hass.states.get("sensor.wireless_client_tx")
assert hass.states.get("sensor.wireless_client_uptime")
| {
"content_hash": "fa53b1f3e072a2f5e9db0b1cb6869fdd",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 88,
"avg_line_length": 33.77524429967427,
"alnum_prop": 0.6595621564278137,
"repo_name": "mezz64/home-assistant",
"id": "100918a93dae574f68e38c4f8e3b138b77e49831",
"size": "10369",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/unifi/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import re
class ToDo(object):
todo_label = u"TODO"
done_label = u"DONE"
boundry = "###"
# TODO: If pattern used a backreference the bountry could be randomized
pattern = "#{3} (%s|%s):\n(.*?)\n#{3}" % (todo_label, done_label)
skeleton = "%s %s:\n%s\n%s\n\n"
single_regexp = re.compile(pattern, re.U + re.S)
def __init__(self, source, raw):
self.source = source
self.block = Block(source)
self.raw = raw
self.note = ToDo.parse(raw)
self.completed = self.isComplete()
self.newRecord = not self.block.hasToDo(self.raw)
self.glyph = self.block.resource.getGlyph()
self._original_state = dict(self.__dict__)
def save(self):
self.block.setup()
note = self.note.strip()
todo = self.skeleton % (self.boundry, self.done_label if self.completed else self.todo_label, note, self.boundry)
if self.newRecord:
self.block.insert(todo)
elif self.isUpdated():
self.block.replace(todo, self.raw)
self.newRecord = False
self._original_state = dict(self.__dict__)
self.raw = todo
def isUpdated(self):
return self._original_state['completed'] is not self.completed or \
self._original_state['note'] is not self.note
def destroy(self):
if not self.newRecord:
self.block.erase(self.raw)
def isComplete(self):
match = self.single_regexp.match(self.raw)
return bool(match) and match.group(1).find(self.done_label) is not -1
def toCell(self):
return {
"completed": self.completed,
"glyph": self.glyph,
"note": self.note,
"todo": self
}
@staticmethod
def parse(raw):
if raw is None:
return None
match = ToDo.single_regexp.match(raw)
if match:
return match.group(2)
else:
return raw
class Resource(object):
def __init__(self, source):
if source is None:
raise AttributeError
self.source = source
self.type = 'font' if hasattr(source, 'info') else 'glyph'
self.info = self.getInfo()
def getInfo(self):
if self.type is 'font':
return self.source.info
else:
return self.source
def getGlyph(self):
if self.type is 'glyph':
return self.source.name
else:
return "*"
class Block(object):
begin = "### RoboToDo ###\n\n"
end = "#####"
block_regexp = re.compile(r"%s(.*)%s" % (begin, end), re.S)
def __init__(self, source):
self.resource = Resource(source)
self.lines = Block.parse(self.resource.info.note)
def setup(self):
if not self.resource.info.note:
self.resource.info.note = ""
if not self.match():
if self.resource.info.note is not "":
self.resource.info.note += "\n"
self.resource.info.note += self.begin + self.end
return self.resource.info.note
def teardown(self):
lines = Block.parse(self.resource.info.note)
if not lines:
self.resource.info.note = self.resource.info.note.replace(self.begin, '')
self.resource.info.note = self.resource.info.note.replace(self.end, '')
self.resource.info.note = self.resource.info.note.rstrip()
def match(self):
if self.resource.info.note is None:
return None
return self.block_regexp.search(self.resource.info.note)
def insert(self, todo):
inserted_block = r"%s\1%s%s" % (self.begin, todo, self.end)
self.resource.info.note = re.sub(self.block_regexp, inserted_block, self.resource.info.note)
def replace(self, todo, raw):
self.resource.info.note = self.resource.info.note.replace(raw, todo)
def erase(self, raw):
self.resource.info.note = self.resource.info.note.replace(raw, '')
self.teardown()
def hasToDo(self, raw):
match = self.match()
if match:
return match.group().find(raw) >= 0
else:
return False
def contents(self):
match = self.match()
if match:
return match.group(1)
@staticmethod
def parse(raw):
if raw is None:
return []
block = Block.block_regexp.search(raw)
if block:
raw_note = block.group(1)
lines = []
matches = ToDo.single_regexp.finditer(raw_note)
for match in matches:
lines.append(match.group())
return lines
else:
return []
| {
"content_hash": "785cad0057a9c352d598fb8b79e20cd7",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 121,
"avg_line_length": 30.943037974683545,
"alnum_prop": 0.5412149723869912,
"repo_name": "jackjennings/RoboToDo",
"id": "dcef3ba136c1f30e28ce4e330b1ed98942747356",
"size": "4889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RoboToDo.roboFontExt/lib/todo/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "930"
},
{
"name": "Python",
"bytes": "15535"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.