repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
AunShiLord/sympy
|
sympy/logic/boolalg.py
|
Python
|
bsd-3-clause
| 49,069
| 0.000346
|
"""
Boolean algebra module for SymPy
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import combinations, product
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.core import C
from sympy.core.numbers import Number
from sympy.core.decorators import deprecated
from sympy.core.operations import LatticeOp
from sympy.core.function import Application
from sympy.core.compatibility import ordered, xrange, with_metaclass
from sympy.core.sympify import converter, _sympify, sympify
from sympy.core.singleton import Singleton, S
class Boolean(Basic):
"""A boolean object is an object for which logic operations make sense."""
__slots__ = []
def __and__(self, other):
"""Overloading for & operator"""
return And(self, other)
__rand__ = __and__
def __or__(self, other):
"""Overloading for |"""
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~"""
return Not(self)
def __rshift__(self, other):
"""Overloading for >>"""
return Implies(self, other)
def __lshift__(self, other):
"""Overloading for <<"""
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
def equals(self, other):
"""
Returns if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy.logic.boolalg import And, Or, Not
>>> (A >> B).equals(~B >> ~A)
True
>>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C)))
False
>>> Not(And(A, Not(A))).equals(Or(B, Not(B)))
False
"""
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
# Developer note: There is liable to be some confusion as to when True should
# be used and when S.true should be used in various contexts throughout SymPy.
# An important thing to remember is that sympify(True) returns S.true. This
# means that for the most part, you can just use True and it will
# automatically be converted to S.true when necessary, similar to how you can
# generally use 1 instead of S.One.
# The rule of thumb is:
# "If the boolean in question can be replaced by an arbitrary symbolic
# Boolean, like Or(x, y) or x > 1, use S.true. Otherwise, use True"
# In other words, use S.true only on those contexts where the boolean is being
# used as a symbolic representation of truth. For example, if the object ends
# up in the .args of any expression, then it must necessarily be S.true
# instead of True, as elements of .args must be Basic. On the other hand, ==
# is not a symbolic operation in SymPy, since it always returns True or False,
# and does so in terms of structural equality rather than mathematical, so it
# should return True. The assumptions system should use True and False. Aside
# from not satisfying the above rule of thumb, the assumptions system uses a
# three-valued logic (True, False, None), whereas S.true and S.false represent
# a two-valued logic. When it doubt, use True.
# 2. "S.true == True" is True.
# While "S.true is True" is False, "S.true == True" is True, so if there is
# any doubt over whether a function or expression will return S.true or True,
# just use "==" instead of "is" to do the comparison, and it will work in
# either case. Finally, for boolean flags, it's better to just use "if x"
# instead of "if x is True". To quote PEP 8:
# Don't compare boo
|
lean values to True or False using ==.
# Yes: if greeting:
# No: if greeting == True:
# Worse: if greeting is True:
class BooleanAtom(Boolean):
"""
Base class of BooleanTrue and BooleanFalse.
"""
@property
|
def canonical(self):
return self
class BooleanTrue(with_metaclass(Singleton, BooleanAtom)):
"""
SymPy version of True, a singleton that can be accessed via S.true.
This is the SymPy version of True, for use in the logic module. The
primary advantage of using true instead of True is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
True they act bitwise on 1. Functions in the logic module will return this
class when they evaluate to true.
Examples
========
>>> from sympy import sympify, true, Or
>>> sympify(True)
True
>>> ~true
False
>>> ~True
-2
>>> Or(True, False)
True
See Also
========
sympy.logic.boolalg.BooleanFalse
"""
def __nonzero__(self):
return True
__bool__ = __nonzero__
def __hash__(self):
return hash(True)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import true
>>> true.as_set()
UniversalSet()
"""
return S.UniversalSet
class BooleanFalse(with_metaclass(Singleton, BooleanAtom)):
"""
SymPy version of False, a singleton that can be accessed via S.false.
This is the SymPy version of False, for use in the logic module. The
primary advantage of using false instead of False is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
False they act bitwise on 0. Functions in the logic module will return this
class when they evaluate to false.
Examples
========
>>> from sympy import sympify, false, Or, true
>>> sympify(False)
False
>>> false >> false
True
>>> False >> False
0
>>> Or(True, False)
True
See Also
========
sympy.logic.boolalg.BooleanTrue
"""
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __hash__(self):
return hash(False)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import false
>>> false.as_set()
EmptySet()
"""
from sympy.sets.sets import EmptySet
return EmptySet()
true = BooleanTrue()
false = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: S.true if x else S.false
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space
It is used as base class for And, Or, Not, etc.
"""
is_Boolean = True
def __call__(self, *args):
return self.func(*[arg(*args) for arg in self.args])
def _eval_simplify(self, ratio, measure):
return simplify_logic(self)
def to_nnf(self, simplify=True):
return self._to_nnf(*self.args, simplify=simplify)
@classmethod
def _to_nnf(cls, *args, **kwargs):
simplify = kwargs.get('simplify', True)
argset = set([])
for arg in args:
if not is_literal(arg):
arg = arg.to_nnf(simplify)
if simplify:
if isinstance(arg, cls):
arg = arg.args
else:
arg = (arg,)
for a in arg:
if Not(a) in argset:
return cls.zero
argset.add(a)
else:
argset.add(arg)
return cls(*argset)
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
|
eunchong/build
|
scripts/slave/recipe_modules/auto_bisect/config_validation.py
|
Python
|
bsd-3-clause
| 3,647
| 0.007952
|
# Copyright 2016 The Chromium Au
|
thors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
# Note: this module is tested by a unit test config_validation_test.py,
# rather than recipe simulation tests.
_BISECT_CONFIG_SCHEMA = {
'command': {'type': 'string', 'required': True},
'good_revision': {'type': 'revision', 'required': True},
'bad_revision': {'type': 'revis
|
ion', 'required': True},
'bisect_bot': {'type': 'string'},
'metric': {'type': 'string'},
'bug_id': {'type': 'integer'},
'repeat_count': {'type': 'integer'},
'max_time_minutes': {'type': 'integer'},
'bisect_mode': {'type': 'string',
'choices': ['mean', 'return_code', 'std_dev']},
'gs_bucket': {'type': 'string'},
'builder_host': {'type': 'string'},
'builder_port': {'type': 'integer'},
'test_type': {'type': 'string'},
'improvement_direction': {'type': 'integer'},
'recipe_tester_name': {'type': 'string'},
'try_job_id': {'type': 'integer'},
}
class ValidationFail(Exception):
"""An exception class that represents a failure to validate."""
def validate_bisect_config(config, schema=None):
"""Checks the correctness of the given bisect job config."""
schema = _BISECT_CONFIG_SCHEMA if schema is None else schema
for key in set(schema):
validate_key(config, schema, key)
if 'good_revision' in schema and 'bad_revision' in schema:
_validate_revisions(config.get('good_revision'), config.get('bad_revision'))
if 'bisect_mode' in schema and 'metric' in schema:
_validate_metric(config.get('bisect_mode'), config.get('metric'))
def validate_key(config, schema, key): # pragma: no cover
"""Checks the correctness of the given field in a config."""
if schema[key].get('required') and config.get(key) is None:
raise ValidationFail('Required key "%s" missing.' % key)
if config.get(key) is None:
return # Optional field.
value = config[key]
field_type = schema[key].get('type')
if field_type == 'string':
_validate_string(value, key)
elif field_type == 'integer':
_validate_integer(value, key)
elif field_type == 'revision':
_validate_revision(value, key)
elif field_type == 'boolean':
_validate_boolean(value, key)
if 'choices' in schema[key] and value not in schema[key]['choices']:
_fail(value, key)
def _fail(value, key):
raise ValidationFail('Invalid value %r for "%s".' % (value, key))
def _validate_string(value, key): # pragma: no cover
if not isinstance(value, basestring):
_fail(value, key)
def _validate_revision(value, key): # pragma: no cover
s = str(value)
if not (s.isdigit() or re.match('^[0-9A-Fa-f]{40}$', s)):
_fail(value, key)
def _validate_integer(value, key): # pragma: no cover
try:
int(value)
except ValueError:
_fail(value, key)
def _validate_boolean(value, key): # pragma: no cover
if value not in (True, False):
_fail(value, key)
def _validate_revisions(good_revision, bad_revision): # pragma: no cover
try:
earlier = int(good_revision)
later = int(bad_revision)
except ValueError:
return # The revisions could be sha1 hashes.
if earlier >= later:
raise ValidationFail('Order of good_revision (%d) and bad_revision(%d) '
'is reversed.' % (earlier, later))
def _validate_metric(bisect_mode, metric): # pragma: no cover
if bisect_mode not in ('mean', 'std_dev'):
return
if not (isinstance(metric, basestring) and metric.count('/') == 1):
raise ValidationFail('Invalid value for "metric": %s' % metric)
|
LyzardKing/ubuntu-make
|
umake/frameworks/dart.py
|
Python
|
gpl-3.0
| 5,057
| 0.002768
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
#
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# detail
|
s.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Dartlang module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.tools import add_env_to_user, MainLoop, get_current_arch, ChecksumType
from umake.ui import UI
logger = logging.getLogger(__name__)
_supported_archs = ['i386', 'amd64']
class DartCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Dart", description=_("Dartlang Development Environment"), logo_path=None)
class DartLangEditorRemoval(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Dart Editor", description=_("Dart SDK with editor (not supported upstream anymore)"),
download_page=None, only_on_archs=_supported_archs, only_for_removal=True, **kwargs)
class DartLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Dart SDK", description=_("Dart SDK (default)"), is_category_default=True,
only_on_archs=_supported_archs,
download_page="https://raw.githubusercontent.com/dart-lang/sdk/master/CHANGELOG.md",
dir_to_decompress_in_tarball="dart-sdk",
checksum_type=ChecksumType.sha256,
required_files_path=[os.path.join("bin", "dart")],
**kwargs)
arch_trans = {
"amd64": "x64",
"i386": "ia32"
# TODO: add arm
}
def parse_download_link(self, line, in_download):
"""Parse Dart SDK download links"""
in_download = False
p = re.search(r"^##\s(\d\S+)", line)
if p is not None:
in_download = True
else:
in_download = False
if in_download:
with suppress(AttributeError):
self.new_download_url = "https://storage.googleapis.com/dart-archive/channels/stable/" +\
"release/{}/sdk/".format(p.group(1)) +\
"dartsdk-linux-{}-release.zip".format(self.arch_trans[get_current_arch()]) +\
".sha256sum"
return ((None, None), in_download)
@MainLoop.in_mainloop_thread
def get_sha_and_start_download(self, download_result):
res = download_result[self.new_download_url]
checksum = res.buffer.getvalue().decode('utf-8').split()[0]
# you get and store self.download_url
url = re.sub('.sha256sum', '', self.new_download_url)
self.check_data_and_start_download(url, checksum)
def post_install(self):
"""Add go necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(self.RELOGIN_REQUIRE_MSG.format(self.name)))
class FlutterLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, **kwargs):
super().__init__(name="Flutter SDK", description=_("Flutter SDK"),
only_on_archs=_supported_archs,
download_page="https://api.flutter.dev/flutter/footer.js",
dir_to_decompress_in_tarball="flutter",
required_files_path=[os.path.join("bin", "flutter")],
**kwargs)
def parse_download_link(self, line, in_download):
"""Parse Flutter SDK download links"""
url = None
in_download = False
if 'Flutter ' in line:
p = re.search(r"Flutter\s(\S+)", line)
if p is not None:
in_download = True
if in_download:
with suppress(AttributeError):
url = "https://storage.googleapis.com/flutter_infra/releases/stable/linux/" +\
"flutter_linux_v{}-stable.tar.xz".format(p.group(1))
return ((url, None), in_download)
def post_install(self):
"""Add flutter necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(self.RELOGIN_REQUIRE_MSG.format(self.name)))
|
niksoc/srmconnect
|
app/migrations/0018_auto_20160822_1047.py
|
Python
|
gpl-3.0
| 4,536
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-22 10:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0017_auto_20160821_1833'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='answer',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='available',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='available',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='comment_answer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='comment_answer',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='comment_availab
|
le',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='comment_available',
name='modified',
field=models.D
|
ateTimeField(blank=True),
),
migrations.AlterField(
model_name='comment_event',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='comment_event',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='comment_question',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='comment_question',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='comment_story',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='comment_story',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='comment_wanted',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='comment_wanted',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='event',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='event',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='project',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='project',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='question',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='question',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='story',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='story',
name='modified',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='wanted',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='wanted',
name='modified',
field=models.DateTimeField(blank=True),
),
]
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kio/KUrlComboBox.py
|
Python
|
gpl-2.0
| 1,586
| 0.010719
|
# encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python3/dist-packages/PyKDE4/kio.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KUrl
|
ComboBox(__PyKDE4_kdeui.KComboBox):
# no doc
def addDefaultUrl(self, *args, **kwargs): # real signatu
|
re unknown
pass
def maxItems(self, *args, **kwargs): # real signature unknown
pass
def mouseMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def mousePressEvent(self, *args, **kwargs): # real signature unknown
pass
def removeUrl(self, *args, **kwargs): # real signature unknown
pass
def setCompletionObject(self, *args, **kwargs): # real signature unknown
pass
def setDefaults(self, *args, **kwargs): # real signature unknown
pass
def setMaxItems(self, *args, **kwargs): # real signature unknown
pass
def setUrl(self, *args, **kwargs): # real signature unknown
pass
def setUrls(self, *args, **kwargs): # real signature unknown
pass
def urlActivated(self, *args, **kwargs): # real signature unknown
pass
def urls(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
Both = 0
Directories = 1
Files = -1
Mode = None # (!) real value is ''
OverLoadResolving = None # (!) real value is ''
RemoveBottom = 1
RemoveTop = 0
|
jvkersch/hsmmlearn
|
hsmmlearn/tests/test_hsmm_wrappers.py
|
Python
|
gpl-3.0
| 1,757
| 0
|
import unittest
import numpy as np
from ..emissions import GaussianEmissions, MultinomialEmissions
from ..hsmm import GaussianHSMM, MultinomialHSMM
class TestHSMMWrappers(unittest.TestCase):
def setUp(self):
# Exact values don't matter
self.tmat = np.eye(3)
self.durations = np.eye(3)
def test_gaussian_hsmm(self):
means = np.array([1.0, 2.0, 3.0])
scales = np.array([0.5, 0.4, 0.3])
hsmm = GaussianHSMM(means, scales, self.durations, self.tmat)
self.assertIsInstance(hsmm.emissions, GaussianEmissions)
np.testing.assert_array_equal(hsmm.emissions.means, means)
np.testing.assert_array_equal(hsmm.emissions.scales, scales)
def test_gaussian_hsmm_means_scales(self):
me
|
ans = np.array([1.0, 2.0, 3.0])
scales = np.array([0.5, 0.4, 0.3])
hsmm = GaussianHSMM(means, scales, self.durations, self.tmat)
# Test property getters
np.testing.assert_array_equal(hsmm.means, means)
np.testing.assert_array_equal(hsmm.scales, scales)
# Now updat
|
e properties and check that the value changed on the
# emissions.
new_means = np.array([5.0, 5.0, 5.0])
new_scales = np.array([1.0, 1.0, 1.0])
hsmm.means = new_means
hsmm.scales = new_scales
emissions = hsmm.emissions
np.testing.assert_array_equal(emissions.means, new_means)
np.testing.assert_array_equal(emissions.scales, new_scales)
def test_multinomial_hsmm(self):
ps = np.ones((3, 5))
hsmm = MultinomialHSMM(ps, self.durations, self.tmat)
self.assertIsInstance(hsmm.emissions, MultinomialEmissions)
np.testing.assert_array_equal(hsmm.emissions._probabilities, ps)
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-prodlim/package.py
|
Python
|
lgpl-2.1
| 1,415
| 0.003534
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RProdlim(RPackage):
"""Product-Limit Estimation for Censored Event History Analysis
Product-Limit Estimation for Censored Event History Analysis. Fast and
user friendly implementation of nonparametric estimators for censored event
history (survival) analysis. Kaplan-Meier and Aalen-Johansen method."""
homepage = "https://cloud.r-project.org/package=prodlim"
url = "https://cloud.r-project.org/src/contrib/prodlim_1.5.9.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/prodlim"
version('2019.11.13', sha256='6809924f503a14681de84730489cdaf9240d7951c64f5b98ca37dc1ce7809b0f')
version('2018.04.18', sha
|
256='4b22b54fdf712439309be0ff74f63cde9080464667b00e19823372ac0fc254ab')
version('1.6.1', sha256='3f2665257118a3db8682731a500b1ae4d669af344672dc2037f987bee3cca154')
versio
|
n('1.5.9', sha256='853644886c57102e7f6dd26b6e03e54bf3f9e126f54c76f8d63a3324811f7b42')
depends_on('r@2.9.0:', type=('build', 'run'))
depends_on('r-rcpp@0.11.5:', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-kernsmooth', type=('build', 'run'))
depends_on('r-lava', type=('build', 'run'))
|
YuxuanLing/trunk
|
trunk/code/study/python/Fluent-Python-example-code/09-pythonic-obj/vector2d_v3_slots.py
|
Python
|
gpl-3.0
| 3,560
| 0.000281
|
"""
A 2-dimensional vector class
>>> v1 = Vector2d(3, 4)
>>> print(v1.x, v1.y)
3.0 4.0
>>> x, y = v1
>>> x, y
(3.0, 4.0)
>>> v1
Vector2d(3.0, 4.0)
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0)
>>> octets = bytes(v1)
>>> octets
b'd\\x00\\x00\\x00\\x00\\x00\\x00\\x08@\\x00\\x00\\x00\\x00\\x00\\x00\\x10@'
>>> abs(v1)
5.0
>>> bool(v1), bool(Vector2d(0, 0))
(True, False)
Test of ``.frombytes()`` class method:
>>> v1_clone = Vector2d.frombytes(bytes(v1))
>>> v1_clone
Vector2d(3.0, 4.0)
>>> v1 == v1_clone
True
Tests of ``format()`` with Cartesian coordinates:
>>> format(v1)
'(3.0, 4.0)'
>>> format(v1, '.2f')
'(3.00, 4.00)'
>>> format(v1, '.3e')
'(3.000e+00, 4.000e+00)'
Tests of the ``angle`` method::
>>> Vector2d(0, 0).angle()
0.0
>>> Vector2d(1, 0).angle()
0.0
>>> epsilon = 10**-8
>>> abs(Vector2d(0, 1).angle() - math.pi/2) < epsilon
True
>>> abs(Vector2d(1, 1).angle() - math.pi/4) < epsilon
True
Tests of ``format()`` with polar coordinates:
>>> format(Vector2d(1, 1), 'p') # doctest:+ELLIPSIS
'<1.414213..., 0.785398...>'
>>> format(Vector2d(1, 1), '.3ep')
'<1.414e+00, 7.854e-01>'
>>> format(Vector2d(1, 1), '0.5fp')
'<1.41421, 0.78540>'
Tests of `x` and `y` read-only properties:
>>> v1.x, v1.y
(3.0, 4.0)
>>> v1.x = 123
Traceback (most recent call last):
...
AttributeError: can't set attribute
Tests of hashing:
>>> v1 = Vector2d(3, 4)
>>> v2 = Vector2d(3.1, 4.2)
>>> hash(v1), hash(v2)
(7, 384307168202284039)
>>> len(set([v1, v2]))
2
# END VECTOR2D_V3_DEMO
"""
from array import array
import math
# BEGIN VECTOR2D_V3_SLOTS
class Vector2d:
__slots__ = ('__x', '__y')
typecode = 'd'
# methods follow (omitted in book listing)
# END VECTOR2D_V3_SLOTS
def __init__(self, x, y):
self.__x = float(x)
self.__y = float(y)
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
def __iter__(self):
return (i for i in (self.x, self.y))
def __repr__(self):
class_na
|
me = type(self).__name__
return '{}({!r}, {!r})'.format(class_name, *self)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(array(self.typecode, self)))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __hash__(self):
return hash(self.x)
|
^ hash(self.y)
def __abs__(self):
return math.hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def angle(self):
return math.atan2(self.y, self.x)
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('p'):
fmt_spec = fmt_spec[:-1]
coords = (abs(self), self.angle())
outer_fmt = '<{}, {}>'
else:
coords = self
outer_fmt = '({}, {})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(*components)
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(*memv)
|
plugaai/pytrthree
|
tools/request_sender.py
|
Python
|
mit
| 2,685
| 0.003724
|
#!/usr/bin/env python
import argparse
import datetime
import pandas as pd
import yaml
from pytrthree import TRTH
from pytrthree.utils import retry
def make_request(daterange, criteria):
request = api.factory.LargeRequestSpec(**template)
short_dates = sorted([x.replace('-', '') for x in daterange.values()])
search_result = api.search_rics(daterange, criteria['ric'], refData=False)
ric_list = [{'code': i['code']} for i in search_result]
request['friendlyName'] = '{}-{}_{}'.format(name, *short_dates)
request['instrumentList']['instrument'] = ric_list
request['dateRa
|
nge'] = daterange
if 'fields' in criteria:
request['messageTypeList']['messageType'][0]['fieldList']['string'] = criteria['fields']
return request
def parse_daterange(s):
return dict(start=str(s.iloc[0].date()), end=str(s.iloc[-1].date()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tool to send a series of requests to TRTH.')
|
parser.add_argument('--config', action='store', type=argparse.FileType('r'), required=True,
help='TRTH API configuration (YAML file)')
parser.add_argument('--template', action='store', type=argparse.FileType('r'), required=True,
help='Base template for the requests (YAML file)')
parser.add_argument('--criteria', action='store', type=argparse.FileType('r'), required=True,
help='Criteria for searching RICs and modifying queried fields (YAML file)')
parser.add_argument('--start', action='store', type=str, required=True,
help='Start date (ISO-8601 datetime string)')
parser.add_argument('--end', action='store', type=str, default=str(datetime.datetime.now().date()),
help='End date (ISO-8601 datetime string). Default to today\'s date.')
parser.add_argument('--group', action='store', type=str, default='1A',
help='Pandas datetime frequency string for grouping requests. Defaults to "1A".')
args = parser.parse_args()
api = TRTH(config=args.config)
api.options['raise_exception'] = True
criteria = yaml.load(args.criteria)
template = yaml.load(args.template)
dates = pd.date_range(args.start, args.end).to_series()
dateranges = [parse_daterange(i) for _, i in dates.groupby(pd.TimeGrouper(args.group))]
for daterange in dateranges:
for name, crit in criteria.items():
request = make_request(daterange, crit)
rid = retry(api.submit_ftp_request, request, sleep=30, exp_base=2)
api.logger.info(rid['requestID'])
api.logger.info('All requests sent!')
|
mitsuhiko/zine
|
zine/docs/builder.py
|
Python
|
bsd-3-clause
| 2,947
| 0.001018
|
# -*- coding: utf-8 -*-
"""
zine.docs.builder
~~~~~~~~~~~~~~~~~~~~~~
The documentation building system. This is only used by the
documentation building script.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import cPickle as pickle
from urlparse import urlparse
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.core import publish_parts
from docutils.writers import html4css1
_toc_re = re.compile(r'<!-- TOC -->(.*?)<!-- /TOC -->(?s)')
_toc_contents_re = re.compile(r'<ul[^>]*>(.*)</ul>(?s)')
def plugin_links_directive(name, arguments, options,
|
content, lineno,
content_offset, block_text, state, state_machine):
return [nodes.comment('', 'PLUGIN_LINKS')]
plugin_links_directive.arguments = (0, 0, 0)
plugin_links_directive.content = 1
directives.register_directi
|
ve('plugin_links', plugin_links_directive)
def is_relative_uri(uri):
if uri.startswith('/'):
return False
# there is no uri parser, but the url parser works mostly
return not urlparse(uri)[0]
class Translator(html4css1.HTMLTranslator):
pass
class DocumentationWriter(html4css1.Writer):
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = Translator
def generate_documentation(data):
toc = '\n\n..\n TOC\n\n.. contents::\n\n..\n /TOC'
parts = publish_parts(data + toc,
writer=DocumentationWriter(),
settings_overrides=dict(
initial_header_level=2,
field_name_limit=50
)
)
toc = None
body = parts['body']
match = _toc_re.search(body)
body = body[:match.start()] + body[match.end():]
match = _toc_contents_re.search(match.group(1))
if match is not None:
toc = match.group(1)
# just add the toc if there are at least two entries.
if toc.count('</li>') < 2:
toc = None
return {
'title': parts['title'],
'body': body,
'toc': toc
}
def walk(directory, callback=lambda filename: None):
"""Walk a directory and translate all the files in there."""
directory = os.path.normpath(directory)
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.endswith('.rst'):
relname = os.path.join(dirpath, filename)[len(directory) + 1:]
f = file(os.path.join(dirpath, filename))
try:
d = generate_documentation(f.read().decode('utf-8'))
finally:
f.close()
f = file(os.path.join(dirpath, filename[:-3] + 'page'), 'wb')
try:
pickle.dump(d, f, protocol=2)
finally:
f.close()
callback(relname)
|
leylabmpi/pyTecanFluent
|
tests/test_Utils.py
|
Python
|
mit
| 1,127
| 0.022183
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import
## batteries
import os
import sys
import pytest
## 3rd party
import pandas as pd
## package
from pyTecanFluent import Utils
# data dir
test_dir = os.path.join(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, 'data')
# tests
def test_make_range():
x = Utils.make_range('all')
assert x is None
x = Utils.make_range('0')
assert x == [0]
x = Utils.make_range('1,2,5')
assert x == [1,2,5]
x = Utils.make_range('1,2,5-6')
assert x == [1,2,5,6]
x = Utils.make_range('1-3,5-6')
assert x == [1,2,3,5,6]
def test_range_zeroindex():
x = Utils.make_range('all', set_zero_index=True)
assert x is None
with pytest.raises(ValueError):
Utils.make_range('0', set_zero
|
_index=True)
x = Utils.make_range('1,2,5', set_zero_index=True)
assert x == [0,1,4]
x = Utils.make_range('1,2,5
|
-6', set_zero_index=True)
assert x == [0,1,4,5]
def test_check_gwl():
gwl_file = os.path.join(data_dir, 'multi_dispense.gwl')
ret = Utils.check_gwl(gwl_file)
assert ret is None
|
mudbungie/carrieocoyle
|
gallery/migrations/0006_auto_20151027_1101.py
|
Python
|
mit
| 404
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
|
dependencies = [
('gallery', '0005_piece_medium'),
]
operations = [
migrations.AlterField(
model_name='medium',
name='medium_name',
|
field=models.TextField(default='Medium'),
),
]
|
johannfaouzi/pyts
|
pyts/datasets/uea.py
|
Python
|
bsd-3-clause
| 9,597
| 0
|
"""
Utility functions for the UEA multivariate time series classification
archive.
"""
# Author: Johann Faouzi <johann.faouzi@gmail.com>
# License: BSD-3-Clause
import numpy as np
import os
import pickle
from scipy.io.arff import loadarff
from sklearn.utils import Bunch
from urllib.request import urlretrieve
import zipfile
def _correct_uea_name_download(dataset):
if dataset == 'Ering':
return 'ERing'
else:
return dataset
def uea_dataset_list():
"""List of available UEA datasets.
Returns
-------
datasets : list
List of available datasets from the UEA Time Series
Classification Archive.
References
----------
.. [1] `List of datasets on the UEA & UCR archive
<http://www.timeseriesclassification.com/dataset.php>`_
Examples
--------
>>> from pyts.datasets import uea_dataset_list
>>> uea_dataset_list()[:3]
['ArticularyWordRecognition', 'AtrialFibrillation', 'BasicMotions']
"""
module_path = os.path.dirname(__file__)
finfo = os.path.join(module_path, 'info', 'uea.pickle')
dictionary = pickle.load(open(finfo, 'rb'))
datasets = sorted(dictionary.keys())
return datasets
def uea_dataset_info(dataset=None):
"""Information about the UEA datasets.
Parameters
----------
dataset : str, list of str or None (default = None)
The data sets for which the information will be returned.
If None, the information for all the datasets is returned.
Returns
-------
dictionary : dict
Dictionary with the information for each dataset.
References
----------
.. [1] `List of datasets on the UEA & UCR archive
<http://www.timeseriesclassification.com/dataset.php>`_
Examples
--------
>>> from pyts.datasets import uea_dataset_info
>>> uea_dataset_info('AtrialFibrillation')['n_timestamps']
640
"""
module_path = os.path.dirname(__file__)
finfo = os.path.join(module_path, 'info', 'uea.pickle')
dictionary = pickle.load(open(finfo, 'rb'))
datasets = list(dictionary.keys())
if dataset is None:
return dictionary
elif isinstance(dataset, str):
if dataset not in datasets:
raise ValueError(
"{0} is not a valid name. The list of available names "
"can be obtained by calling the "
"'pyts.datasets.uea_dataset_list' function."
.format(dataset)
)
else:
return dictionary[dataset]
elif isinstance(dataset, (list, tuple, np.ndarray)):
dataset = np.asarray(dataset)
invalid_datasets = np.setdiff1d(dataset, datasets)
if invalid_datasets.size > 0:
raise ValueError(
"The following names are not valid: {0}. The list of "
"available names can be obtained by calling the "
"'pyts.datasets.uea_dataset_list' function."
.format(invalid_datasets)
)
else:
info = {}
for data in dataset:
info[data] = dictionary[data]
return info
def fetch_uea_dataset(dataset, use_cache=True, data_home=None,
return_X_y=False): # noqa 207
"""Fetch dataset from UEA TSC Archive by name.
Fetched data sets are saved by default in the
``pyts/datasets/cached_datasets/UEA/`` folder. To avoid
downloading the same data set several times, it is
highly recommended not to change the default values
of ``use_cache`` and ``path``.
Parameters
----------
dataset : str
Name of the dataset.
use_cache : bool (default = True)
If True, look if the data set has already been fetched
and load the fetched version if it is the case. If False,
download the data set from the UCR Time Series Classification
Archive.
data_home : None or str (default = None)
The path of the folder containing the cached data set.
If None, the ``pyts.datasets.cached_datasets/UEA/`` folder is
used. If the data set is not found, it is downloaded and cached
in this path.
return_X_y : bool (default = False)
If True, returns ``(data_train, data_test, target_train, target_test)``
instead of a Bunch object. See below for more information about the
`data` and `target` object.
Returns
-------
data : Bunch
Dictionary-like object, with attributes:
data_train : array of floats
The time series in the training set.
data_test : array of floats
The time series in the test set.
target_train : array of integers
The classification labels in the training set.
target_test : array of integers
The classification labels in the test set.
DESCR : str
The full description of the dataset.
url : str
The url of the dataset.
(data_train, data_test, target_train, target_test) : tuple if \
``return_X_y`` is True
Notes
-----
Missing values are represented as NaN's.
References
----------
.. [1] A. Bagnall et al, "The UEA multivariate time series
classification archive, 2018". arXiv:1811.00075 [cs, stat],
2018.
.. [2] A. Bagnall et al, "The UEA & UCR Time Series Classification
Repository", www.timeseriesclassification.com.
"""
if dataset not in uea_dataset_list():
raise ValueError(
"{0} is not a valid name. The list of available names "
"can be obtained with ``pyts.datasets.uea_dataset_list()``"
.format(dataset)
|
)
if data_home is None:
import pyts
home = '/'.join(pyts.__file__.split('/')[:-2]) + '/'
relative_path = 'pyts/datasets/cached_datasets/UEA/'
path = home + relative_path
else:
path = data_home
if not os.path.exists(path):
os.makedirs(path)
correct_datas
|
et = _correct_uea_name_download(dataset)
if use_cache and os.path.exists(path + correct_dataset):
bunch = _load_uea_dataset(correct_dataset, path)
else:
url = ("http://www.timeseriesclassification.com/Downloads/{0}.zip"
.format(correct_dataset))
filename = 'temp_{}'.format(correct_dataset)
_ = urlretrieve(url, path + filename)
zipfile.ZipFile(path + filename).extractall(path + correct_dataset)
os.remove(path + filename)
bunch = _load_uea_dataset(correct_dataset, path)
if return_X_y:
return (bunch.data_train, bunch.data_test,
bunch.target_train, bunch.target_test)
return bunch
def _load_uea_dataset(dataset, path):
"""Load a UEA data set from a local folder.
Parameters
----------
dataset : str
Name of the dataset.
path : str
The path of the folder containing the cached data set.
Returns
-------
data : Bunch
Dictionary-like object, with attributes:
data_train : array of floats
The time series in the training set.
data_test : array of floats
The time series in the test set.
target_train : array
The classification labels in the training set.
target_test : array
The classification labels in the test set.
DESCR : str
The full description of the dataset.
url : str
The url of the dataset.
Notes
-----
Missing values are represented as NaN's.
"""
new_path = path + dataset + '/'
try:
description_file = [
file for file in os.listdir(new_path)
if ('Description.txt' in file
or dataset + '.txt' in file)
][0]
except IndexError:
description_file = None
if description_file is not None:
try:
with(open(new_path + description_file, encoding='utf-8')) as f:
description = f.read()
except UnicodeDecodeError:
with(open(new_path + description_file,
encoding
|
frappe/frappe
|
frappe/patches/v10_0/refactor_social_login_keys.py
|
Python
|
mit
| 4,935
| 0.024924
|
import frappe
from frappe.utils import cstr
def execute():
# Update Social Logins in User
run_patch()
# Create Social Login Key(s) from Social Login Keys
frappe.reload_doc("integrations", "doctype", "social_login_key", force=True)
if not frappe.db.exists('DocType', 'Social Login Keys'):
return
social_login_keys = frappe.get_doc("Social Login Keys", "Social Login Keys")
if social_login_keys.get("facebook_client_id") or social_login_keys.get("facebook_client_secret"):
facebook_login_key = frappe.new_doc("Social Login Key")
facebook_login_key.get_social_login_provider("Facebook", initialize=True)
facebook_login_key.social_login_provider = "Facebook"
facebook_login_key.client_id = social_login_keys.get("facebook_client_id")
facebook_login_key.client_secret = social_login_keys.get("facebook_client_secret")
if not (facebook_login_key.client_secret and facebook_login_key.client_id):
facebook_login_key.enable_social_login = 0
facebook_login_key.save()
if social_login_keys.get("frappe_server_url"):
frappe_login_key = frappe.new_doc("Social Login Key")
frappe_login_key.get_social_login_provider("Frappe", initialize=True)
frappe_login_key.social_login_provider = "Frappe"
frappe_login_key.base_url = social_login_keys.get("frappe_server_url")
frappe_login_key.client_id = social_login_keys.get("frappe_client_id")
frappe_login_key.client_secret = social_login_keys.get("frappe_client_secret")
if not (frappe_login_key.client_secret and frappe_login_key.client_id and frappe_login_key.base_url):
frappe_login_key.enable_social_login = 0
frappe_login_key.save()
if social_login_keys.get("github_client_id") or social_login_keys.get("github_client_secret"):
github_login_key = frappe.new_doc("Social Login Key")
github_login_key.get_social_login_provider("GitHub", initialize=True)
github_login_key.social_login_provider = "GitHub"
github_login_key.client_id = social_login_keys.get("github_client_id")
github_login_key.client_secret = social_login_keys.get("github_cl
|
ient_secret")
if not (github_login_key.client_secret and github_login_key.client_id):
github_login_key.enable_social_login = 0
github_login_key.save()
if social_login_keys.get("google_client_id") or social_login_keys.get("google_client_secret"):
google_login_key = frappe.new_doc("Social Login Key")
google_login_key.get_social_login_provider("Google", initialize=True)
google_login_key.social_login_provider = "Google"
google_login_key.client_id = social
|
_login_keys.get("google_client_id")
google_login_key.client_secret = social_login_keys.get("google_client_secret")
if not (google_login_key.client_secret and google_login_key.client_id):
google_login_key.enable_social_login = 0
google_login_key.save()
frappe.delete_doc("DocType", "Social Login Keys")
def run_patch():
frappe.reload_doc("core", "doctype", "user", force=True)
frappe.reload_doc("core", "doctype", "user_social_login", force=True)
users = frappe.get_all("User", fields=["*"], filters={"name":("not in", ["Administrator", "Guest"])})
for user in users:
idx = 0
if user.frappe_userid:
insert_user_social_login(user.name, user.modified_by, 'frappe', idx, userid=user.frappe_userid)
idx += 1
if user.fb_userid or user.fb_username:
insert_user_social_login(user.name, user.modified_by, 'facebook', idx, userid=user.fb_userid, username=user.fb_username)
idx += 1
if user.github_userid or user.github_username:
insert_user_social_login(user.name, user.modified_by, 'github', idx, userid=user.github_userid, username=user.github_username)
idx += 1
if user.google_userid:
insert_user_social_login(user.name, user.modified_by, 'google', idx, userid=user.google_userid)
idx += 1
def insert_user_social_login(user, modified_by, provider, idx, userid=None, username=None):
source_cols = get_standard_cols()
creation_time = frappe.utils.get_datetime_str(frappe.utils.get_datetime())
values = [
frappe.generate_hash(length=10),
creation_time,
creation_time,
user,
modified_by,
user,
"User",
"social_logins",
cstr(idx),
provider
]
if userid:
source_cols.append("userid")
values.append(userid)
if username:
source_cols.append("username")
values.append(username)
query = """INSERT INTO `tabUser Social Login` (`{source_cols}`)
VALUES ({values})
""".format(
source_cols = "`, `".join(source_cols),
values= ", ".join([frappe.db.escape(d) for d in values])
)
frappe.db.sql(query)
def get_provider_field_map():
return frappe._dict({
"frappe": ["frappe_userid"],
"facebook": ["fb_userid", "fb_username"],
"github": ["github_userid", "github_username"],
"google": ["google_userid"],
})
def get_provider_fields(provider):
return get_provider_field_map().get(provider)
def get_standard_cols():
return ["name", "creation", "modified", "owner", "modified_by", "parent", "parenttype", "parentfield", "idx", "provider"]
|
CxAalto/gtfspy
|
gtfspy/spreading/heap.py
|
Python
|
mit
| 2,529
| 0.001977
|
from heapq import heappush, heappop
import numpy as np
from gtfspy.route_types import WALK
from .event import Event
class EventHeap:
"""
EventHeap represents a container for the event
heap to run time-dependent Dijkstra for public transport routing objects.
"""
def __init__(self, pd_df=None):
"""
Parameters
----------
pd_df : Pandas.Dataframe
Initial list of
"""
self.heap = []
keys = ['arr_time_ut', 'dep_time_ut', 'from_stop_I', 'to_stop_I', 'trip_I']
# pd_df.iterrows() is slow as it creates new Series objects!
n = len(pd_df)
key_to_j = {}
for j, key in enumerate(pd_df.columns.values):
key_to_j[key] = j
pd_df_values = pd_df.values
for i in range(n):
vals = []
for key in keys:
j = key_to_j[key]
vals.append(pd_df_values[i, j])
e = Event(*vals)
self.add_event(e)
def add_event(self, event):
"""
Add an event to the heap/priority queue
Parameters
----------
event : Event
"""
assert event.dep_time_ut <= event.arr_time_ut
heappush(self.heap, event)
def pop_next_event(self):
return heappop(self.heap)
def size(self):
"""
Return the size of the heap
"""
return len(self.heap)
def add_walk_events_to_heap(self, transfer_distances, e, start_time_ut, walk_speed, uninfected_stops, max_duration_ut):
"""
Parameters
----------
transfer_distances:
e : Event
start_time_ut : int
walk_speed : float
uninfected_stops : list
max_duration_ut : int
"""
n = len(transfer_distances)
dists_values = transfer_distances.values
to_stop_I_index = np.nonzero(transfer_distances.columns == 'to_stop_I')[0][0]
d_index = np.nonzero(transfer_distances.columns == 'd')[0][0]
for i in range(n):
transfer_to_stop_I = dists_values[i, to_stop_I_index]
if transfer_to_stop_I in uninfected_stops:
d = dists_values[i, d_index]
transfer_arr_time = e.arr_time_ut + int(d/float(walk_speed))
if transfer_arr_time > s
|
tart_time_ut+max_duration_ut:
continue
te = Event(transfer_arr_time, e.arr_time_ut, e.to_stop_I, transfe
|
r_to_stop_I, WALK)
self.add_event(te)
|
OpenNetworkingFoundation/PIF-Open-Intermediate-Representation
|
pif_ir/meta_ir/validate.py
|
Python
|
apache-2.0
| 4,103
| 0.006581
|
"""
@file
@brief Validator for MetaIR
Does semantic validation of the MetaIR instance
"""
from common import *
def meta_ir_validate_parser(instance):
"""
@brief Semantic validation of an MetaIR instance
@param instance The MetaIR instance map
@returns Boolean, True if instance is valid.
The instance is assumed to be a syntactically valid instance.
This routine checks:
The Parser:
Each edge connects two declared states
In so doing, the validator generates additional structures
and binds them to the IR. These inc
"""
pass
def meta_ir_validate_instance(instance):
"""
@brief Semantic validatio
|
n of an MetaIR instance
@param instance The MetaIR in
|
stance map
@returns Boolean, True if instance is valid.
The instance is assumed to be a syntactically valid instance.
This routine calls the object specific validators:
parser
tables
The Parser:
Each edge connects two declared states
In so doing, the validator generates additional structures
and binds them to the IR. These inc
"""
pass
def meta_ir_check_object(meta_ir_instance, obj_type_name, name, type,
implementation_type=None):
"""
@brief Check basic MetaIR characteristics for an object reference
@param meta_ir_instance The top level mapping for the IR
@param obj_type_name The name of the object to report on error
@param name The name of the top level object
@param type The expected MetaIR type for the object
@param implementation_type If not None, check impl is present and has type
TODO Support a set for implementation type
"""
meta_ir_assert(name in meta_ir_instance.keys(),
"%s: %s is not in top level for type %s" %
(obj_type_name, name, type))
meta_ir_assert("type" in meta_ir_instance[name].keys(),
"%s: %s is not an MetaIR object" % (obj_type_name, name))
meta_ir_assert(meta_ir_instance[name]["type"] == type,
"%s: %s is not the expected type. Got %s, expected %s" %
(obj_type_name, name, meta_ir_instance[name]["type"], type))
if implementation_type is not None:
meta_ir_assert("format" in meta_ir_instance[name].keys(),
"%s: Expected format indication for %s" %
(obj_type_name, name))
meta_ir_assert(meta_ir_instance[name]["format"] == implementation_type,
"%s: implementation format for %s is %s, expected %s" %
(obj_type_name, name, meta_ir_instance[name]["format"],
implementation_type))
meta_ir_assert("implementation" in meta_ir_instance[name].keys(),
"%s: Expected implemenation for %s" %
(obj_type_name, name))
meta_ir_assert("implementation" in meta_ir_instance[name].keys(),
"%s: Expected implemenation for %s" %
(obj_type_name, name))
def meta_ir_check_header(meta_ir_instance, name):
"""
@brief Validate a reference to an MetaIR header
@param meta_ir_instance The top level MetaIR instance map
@param name The name of the header
@returns Boolean, True if a valid reference
"""
if name not in meta_ir_instance.keys():
return False
if "type" not in meta_ir_instance[name].keys():
return False
if meta_ir_instance[name]["type"] != "header":
return False
return True
def meta_ir_validate_data_ref(meta_ir_instance, name):
"""
@brief Validate a reference to an MetaIR field
@param meta_ir_instance The top level MetaIR instance map
@param name The reference being checked
@returns Boolean, True if a valid reference
Currently only supports header and header.fld
"""
parts = name.split(".")
if len(parts) == 1:
return meta_ir_check_header(meta_ir_instance, parts[0])
elif len(parts) == 2:
return meta_ir_find_field(meta_ir_instance, parts[0], parts[1]) is not None
return False
|
alvason/probability-insighter
|
code/mutation-drift-selection.py
|
Python
|
gpl-2.0
| 11,460
| 0.006457
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Wright-Fisher model of mutation, selection and random genetic drift
# <markdowncell>
# A Wright-Fisher model has a fixed population size *N* and discrete non-overlapping generations. Each generation, each individual has a random number of offspring whose mean is proportional to the individual's fitness. Each generation, mutation may occur. Mutations may increase or decrease individual's fitness, which affects the chances of that individual's offspring in subsequent generations.
# <markdowncell>
# Here, I'm using a fitness model where some proportion of the time a mutation will have a fixed fitness effect, increasing or decreasing fitness by a fixed amount.
# <headingcell level=2>
# Setup
# <codecell>
import numpy as np
import itertools
# <headingcell level=2>
# Make population dynamic model
# <headingcell level=3>
# Basic parameters
# <codecell>
pop_size = 100
# <codecell>
seq_length = 10
# <codecell>
alphabet = ['A', 'T']
# <codecell>
base_haplotype = "AAAAAAAAAA"
# <codecell>
fitness_effect = 1.1 # fitness effect if a functional mutation occurs
# <codecell>
fitness_chance = 0.1 # chance that a mutation has a fitness effect
# <headingcell level=3>
# Population of haplotypes maps to counts and fitnesses
# <markdowncell>
# Store this as a lightweight Dictionary that maps a string to a count. All the sequences together will have count *N*.
# <codecell>
pop = {}
# <codecell>
pop["AAAAAAAAAA"] = 40
# <codecell>
pop["AAATAAAAAA"] = 30
# <codecell>
pop["AATTTAAAAA"] = 30
# <markdowncell>
# *Map haplotype string to fitness float.*
# <codecell>
fitness = {}
# <codecell>
fitness["AAAAAAAAAA"] = 1.0
# <codecell>
fitness["AAATAAAAAA"] = 1.05
# <codecell>
fitness["AATTTAAAAA"] = 1.10
# <codecell>
pop["AAATAAAAAA"]
# <codecell>
fitness["AAATAAAAAA"]
# <headingcell level=3>
# Add mutation
# <codecell>
mutation_rate = 0.005 # per gen per individual per site
# <codecell>
def get_mutation_count():
mean = mutation_rate * pop_size * seq_length
return np.random.poisson(mean)
# <codecell>
def get_random_haplotype():
haplotypes = pop.keys()
frequencies = [x/float(pop_size) for x in pop.values()]
total = sum(frequencies)
frequencies = [x / total for x in frequencies]
return np.random.choice(haplotypes, p=frequencies)
# <codecell>
def get_mutant(haplotype):
site = np.random.randint(seq_length)
possible_mutations = list(alphabet)
possible_mutations.remove(haplotype[site])
mutation = np.random.choice(possible_mutations)
new_haplotype = haplotype[:site] + mutation + haplotype[site+1:]
return new_haplotype
# <markdowncell>
# *Mutations have fitness effects*
# <codecell>
def get_fitness(haplotype):
old_fitness = fitness[haplotype]
if (np.random.random() < fitness_chance):
return old_fitness * fitness_effect
else:
return old_fitness
# <codecell>
get_fitness("AAAAAAAAAA")
# <markdowncell>
# *If a mutation event creates a new haplotype, assign it a random fitness.*
# <codecell>
def mutation_event():
haplotype = get_random_haplotype()
if pop[haplotype] > 1:
pop[haplotype] -= 1
new_haplotype = get_mutant(haplotype)
if new_haplotype in pop:
pop[new_haplotype] += 1
else:
pop[new_haplotype] = 1
if new_haplotype not in fitness:
fitness[new_haplotype] = get_fitness(haplotype)
# <codecell>
mutation_event()
# <codecell>
pop
# <codecell>
fitness
# <codecell>
def mutation_step():
mutation_count = get_mutation_count()
for i in range(mutation_count):
mutation_event()
# <headingcell level=3>
# Genetic drift and fitness affect which haplotypes make it to the next generation
# <markdowncell>
# *Fitness weights the multinomial draw.*
# <codecell>
def get_offspring_counts():
haplotypes = pop.keys()
frequencies = [pop[haplotype]/float(pop_size) for haplotype in haplotypes]
fitnesses = [fitness[haplotype] for haplotype in haplotypes]
weights = [x * y for x,y in zip(frequencies, fitnesses)]
total = sum(weights)
weig
|
hts = [x / total for x in weights]
return list(np.random.multinomial(pop_size, weights))
# <codecell>
get_offspring_counts()
# <codecell>
def offspring_step():
counts = get_offspring_counts()
for (haplotype, count) in zip(pop.keys(), counts):
if (count > 0):
pop[haplotype] = count
else:
del pop[haplotype]
# <h
|
eadingcell level=3>
# Combine and iterate
# <codecell>
def time_step():
mutation_step()
offspring_step()
# <codecell>
generations = 5
# <codecell>
def simulate():
for i in range(generations):
time_step()
# <headingcell level=3>
# Record
# <markdowncell>
# We want to keep a record of past population frequencies to understand dynamics through time. At each step in the simulation, we append to a history object.
# <codecell>
history = []
# <codecell>
def simulate():
clone_pop = dict(pop)
history.append(clone_pop)
for i in range(generations):
time_step()
clone_pop = dict(pop)
history.append(clone_pop)
# <codecell>
simulate()
# <headingcell level=2>
# Analyze trajectories
# <headingcell level=3>
# Calculate diversity
# <codecell>
def get_distance(seq_a, seq_b):
diffs = 0
length = len(seq_a)
assert len(seq_a) == len(seq_b)
for chr_a, chr_b in zip(seq_a, seq_b):
if chr_a != chr_b:
diffs += 1
return diffs / float(length)
# <codecell>
def get_diversity(population):
haplotypes = population.keys()
haplotype_count = len(haplotypes)
diversity = 0
for i in range(haplotype_count):
for j in range(haplotype_count):
haplotype_a = haplotypes[i]
haplotype_b = haplotypes[j]
frequency_a = population[haplotype_a] / float(pop_size)
frequency_b = population[haplotype_b] / float(pop_size)
frequency_pair = frequency_a * frequency_b
diversity += frequency_pair * get_distance(haplotype_a, haplotype_b)
return diversity
# <codecell>
def get_diversity_trajectory():
trajectory = [get_diversity(generation) for generation in history]
return trajectory
# <headingcell level=3>
# Plot diversity
# <codecell>
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
# <codecell>
def diversity_plot():
mpl.rcParams['font.size']=14
trajectory = get_diversity_trajectory()
plt.plot(trajectory, "#447CCD")
plt.ylabel("diversity")
plt.xlabel("generation")
# <headingcell level=3>
# Analyze and plot divergence
# <codecell>
def get_divergence(population):
haplotypes = population.keys()
divergence = 0
for haplotype in haplotypes:
frequency = population[haplotype] / float(pop_size)
divergence += frequency * get_distance(base_haplotype, haplotype)
return divergence
# <codecell>
def get_divergence_trajectory():
trajectory = [get_divergence(generation) for generation in history]
return trajectory
# <codecell>
def divergence_plot():
mpl.rcParams['font.size']=14
trajectory = get_divergence_trajectory()
plt.plot(trajectory, "#447CCD")
plt.ylabel("divergence")
plt.xlabel("generation")
# <headingcell level=3>
# Plot haplotype trajectories
# <codecell>
def get_frequency(haplotype, generation):
pop_at_generation = history[generation]
if haplotype in pop_at_generation:
return pop_at_generation[haplotype]/float(pop_size)
else:
return 0
# <codecell>
def get_trajectory(haplotype):
trajectory = [get_frequency(haplotype, gen) for gen in range(generations)]
return trajectory
# <codecell>
def get_all_haplotypes():
haplotypes = set()
for generation in history:
for haplotype in generation:
haplotypes.add(haplotype)
return haplotypes
# <codecell>
colors = ["#781C86", "#571EA2", "#462EB9", "#3F47C9", "#3F63CF", "#447CCD", "#4C90C0", "#56A0AE", "#63AC9A", "#72B485", "#83BA70", "#96BD60", "#AABD52",
|
colobas/gerador-horarios
|
bs4/builder/_html5lib.py
|
Python
|
mit
| 6,730
| 0.027637
|
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
return markup, None, None, False
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# XXX This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + node.element)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
self.soup.object_was_parsed(node.element, parent=self.element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(self.soup.new_string(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(
Element(child, self.soup
|
, namespaces["html"]))
else:
newParent.appendChild(
TextNode(child, self.soup))
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespa
|
ce)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
|
erickeller/edi
|
tests/lib/test_playbookrunner.py
|
Python
|
lgpl-3.0
| 2,684
| 0.000373
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
from edi.lib.configurationparser import ConfigurationParser
from edi.lib.playbookrunner import PlaybookRunner
from tests.libtesting.fixtures.configfiles import config_files
from tests.libtesting.helpers import get_command_parameter
from edi.lib import mockablerun
import shutil
import subprocess
from codecs import open
import yaml
def verify_inventory(file):
with open(file, encoding='utf-8') as f:
assert 'fake-container' in f.read()
def verify_extra_vars(file):
print(file)
with open(file, encoding='utf-8') as f:
extra_vars = yaml.load(f)
assert extra_vars['edi_config_management_user_name'] == 'edicfgmgmt'
mountpoints = extra_vars['edi_shared_folder_mountpoints']
assert len(mountpoints) == 2
assert mountpoints[0] == '/foo/bar/target_mountpoint'
def test_lxd_connection(config_files, monkeypatch):
def fake_ansible_playbook_run(*popenargs, **kwargs):
command = popenargs[0]
if command[0] == 'ansible-playbook':
assert 'lxd' == get_command_parameter(command, '--connection')
verify_inventory(get_command_parameter(command, '--inventory'))
verify_extra_vars(get_command_parameter(command, '--extra-vars').lstrip('@'))
# TODO: verify --user for ssh connection
return subprocess.CompletedProcess("fakerun", 0, '')
else:
return subprocess.run(*popenargs, **kwargs)
monkeypatch.setattr(mockablerun, 'run_mockable', fake_ansible_playbook_run)
def fakechown(*_):
pass
monkeypatch.setattr(shutil, 'chown', fakechown)
with open(config_files, "r") as ma
|
in_file:
parser = ConfigurationParser(main_file)
runner = PlaybookRunner(parser, "fake-container", "lxd")
playbooks = runner.run_all()
expected_playbooks = ['10_base_system', '20_networking', '30_foo']
assert playbooks == expected_playbook
|
s
|
RemuTeam/Remu
|
project/tests/GUI/PopUps/test_remove_presentations_pop_up.py
|
Python
|
mit
| 1,094
| 0.002742
|
import unittest
from GUI.PopUps.RemovePresentationsPopUp import RemovePresentationsPopUp
class TestBindPresentat
|
ionToSlavePopup(unittest.TestCase):
def setUp(self):
self.presentations = ["Slave1", "Slave2"]
self.remove_popup = RemovePresentationsPopUp(self.presentations, None)
def test_init_setups_properly(self):
self.assertEquals(len(self.remove_popup.selected_presentations), 0)
self.assertIsNone(self.remove_popup.listener)
def test_init_populates_presentation_list_properly(self):
s
|
elf.assertEqual(len(self.remove_popup.ids.presentation_list.children), 2)
def test_selected_presentations_is_empty_at_first(self):
self.assertEqual(len(self.remove_popup.selected_presentations), 0)
#
# def test_checkbox_selects(self):
# self.bind_popup.on_checkbox_active(self.bind_popup.checkboxes[0].ids.checker, True)
# self.assertEquals(self.bind_popup.selected_slave, self.slave_name)
#
# def test_confirm(self):
# self.bind_popup.confirm()
# self.assertEquals(self.button.text, '')
|
undefinedv/Jingubang
|
sqlmap/lib/utils/crawler.py
|
Python
|
gpl-3.0
| 8,802
| 0.002272
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import httplib
import os
import re
import urlparse
import tempfile
import time
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import findPageForms
from lib.core.common import getSafeExString
from lib.core.common import openFile
from lib.core.common import readInput
from lib.core.common import safeCSValue
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import MKSTEMP_PREFIX
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapSyntaxException
from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS
from lib.core.threads import getCurrentThreadData
from lib.core.threads import runThreads
from lib.parse.sitemap import parseSitemap
from lib.request.connect import Connect as Request
from thirdparty.beautifulsoup.beautifulsoup import BeautifulSoup
from thirdparty.oset.pyoset import oset
def crawl(target):
try:
visited = set()
threadData = getCurrentThreadData()
threadData.shared.value = oset()
def crawlThread():
threadData = getCurrentThreadData()
while kb.threadContinue:
with kb.locks.limit:
if threadData.shared.unprocessed:
current = threadData.shared.unprocessed.pop()
if current in visited:
continue
elif conf.crawlExclude and re.search(conf.crawlExclude, current):
dbgMsg = "skipping '%s'" % current
logger.debug(dbgMsg)
continue
else:
visited.add(current)
else:
break
content = None
try:
if current:
content = Request.getPage(url=current, crawling=True, raise404=False)[0]
except SqlmapConnectionException, ex:
errMsg = "connection exception detected (%s). skipping " % ex
errMsg += "URL '%s'" % current
logger.critical(errMsg)
except SqlmapSyntaxException:
errMsg = "invalid URL detected. skipping '%s'" % current
logger.critical(errMsg)
except httplib.InvalidURL, ex:
errMsg = "invalid URL detected (%s). skipping " % ex
errMsg += "URL '%s'" % current
logger.critical(errMsg)
if not kb.threadContinue:
break
if isinstance(content, unicode):
try:
match = re.search(r"(?si)<html[^>]*>(.+)</html>", content)
if match:
content = "<html>%s</html>" % match.group(1)
soup = BeautifulSoup(content)
tags = soup('a')
if not tags:
tags = re.finditer(r'(?si)<a[^>]+href="(?P<href>[^>"]+)"', content)
for tag in tags:
href = tag.get("href") if hasattr(tag, "get") else tag.group("href")
if href:
if threadData.lastRedirectURL and threadData.lastRedirectURL[0] == threadData.lastRequestUID:
current = threadData.lastRedirectURL[1]
url = urlparse.urljoin(current, href)
# flag to know if we are dealing with the same target host
|
_ = red
|
uce(lambda x, y: x == y, map(lambda x: urlparse.urlparse(x).netloc.split(':')[0], (url, target)))
if conf.scope:
if not re.search(conf.scope, url, re.I):
continue
elif not _:
continue
if url.split('.')[-1].lower() not in CRAWL_EXCLUDE_EXTENSIONS:
with kb.locks.value:
threadData.shared.deeper.add(url)
if re.search(r"(.*?)\?(.+)", url):
threadData.shared.value.add(url)
except UnicodeEncodeError: # for non-HTML files
pass
finally:
if conf.forms:
findPageForms(content, current, False, True)
if conf.verbose in (1, 2):
threadData.shared.count += 1
status = '%d/%d links visited (%d%%)' % (threadData.shared.count, threadData.shared.length, round(100.0 * threadData.shared.count / threadData.shared.length))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
threadData.shared.deeper = set()
threadData.shared.unprocessed = set([target])
if not conf.sitemapUrl:
message = "do you want to check for the existence of "
message += "site's sitemap(.xml) [y/N] "
test = readInput(message, default="n")
if test[0] in ("y", "Y"):
found = True
items = None
url = urlparse.urljoin(target, "/sitemap.xml")
try:
items = parseSitemap(url)
except SqlmapConnectionException, ex:
if "page not found" in getSafeExString(ex):
found = False
logger.warn("'sitemap.xml' not found")
except:
pass
finally:
if found:
if items:
for item in items:
if re.search(r"(.*?)\?(.+)", item):
threadData.shared.value.add(item)
if conf.crawlDepth > 1:
threadData.shared.unprocessed.update(items)
logger.info("%s links found" % ("no" if not items else len(items)))
infoMsg = "starting crawler"
if conf.bulkFile:
infoMsg += " for target URL '%s'" % target
logger.info(infoMsg)
for i in xrange(conf.crawlDepth):
threadData.shared.count = 0
threadData.shared.length = len(threadData.shared.unprocessed)
numThreads = min(conf.threads, len(threadData.shared.unprocessed))
if not conf.bulkFile:
logger.info("searching for links with depth %d" % (i + 1))
runThreads(numThreads, crawlThread, threadChoice=(i>0))
clearConsoleLine(True)
if threadData.shared.deeper:
threadData.shared.unprocessed = set(threadData.shared.deeper)
else:
break
except KeyboardInterrupt:
warnMsg = "user aborted during crawling. sqlmap "
warnMsg += "will use partial list"
logger.warn(warnMsg)
finally:
clearConsoleLine(True)
if not threadData.shared.value:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
else:
for url in threadData.shared.value:
kb.targets.add((url, None, None, None, None))
storeResultsToFile(kb.targets)
def storeResultsToFile(results):
if not results:
return
if kb.storeCrawlingChoice is None:
message = "do you want to store crawling results to a temporary file "
message += "for eventual further processing with other tools [y/N] "
test = readInput(message, default="N")
kb.storeCrawlingChoice = test[0] in ("y", "Y")
|
anhaidgroup/py_entitymatching
|
py_entitymatching/dask/dask_black_box_blocker.py
|
Python
|
bsd-3-clause
| 22,385
| 0.00344
|
from collections import OrderedDict
import logging
import time
import sys
import pandas as pd
import numpy as np
import pyprind
import dask
from dask import delayed
from dask.diagnostics import ProgressBar
import cloudpickle as cp
import pickle
from py_entitymatching.blocker.blocker import Blocker
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, \
add_key_column
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
class DaskBlackBoxBlocker(Blocker):
"""
WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE A0T YOUR OWN RISK.
Blocks based on a black box function specified by the user.
"""
def __init__(self, *args, **kwargs):
logg
|
er.warning(
"WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT
|
YOUR OWN "
"RISK.")
super(Blocker, self).__init__(*args, **kwargs)
self.black_box_function = None
def set_black_box_function(self, function):
"""Sets black box function to be used for blocking.
Args:
function (function): the black box function to be used for blocking .
"""
self.black_box_function = function
def block_tables(self, ltable, rtable,
l_output_attrs=None, r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
verbose=False, show_progress=True, n_ltable_chunks=1,
n_rtable_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Blocks two tables based on a black box blocking function specified
by the user.
Finds tuple pairs from left and right tables that survive the black
box function. A tuple pair survives the black box blocking function if
the function returns False for that pair, otherwise the tuple pair is
dropped.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_output_attrs (list): A list of attribute names from the left
table to be included in the
output candidate set (defaults to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the
output candidate set (defaults to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_ltable_chunks (int): The number of partitions to split the left table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
n_rtable_chunks (int): The number of partitions to split the right table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If values in `l_output_attrs` is not of type
string.
AssertionError: If values in `r_output_attrs` is not of type
string.
AssertionError: If `l_output_prefix` is not of type
string.
AssertionError: If `r_output_prefix` is not of type
string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `show_progress` is not of type boolean.
AssertionError: If `n_ltable_chunks` is not of type
int.
AssertionError: If `n_rtable_chunks` is not of type
int.
AssertionError: If `l_out_attrs` are not in the ltable.
AssertionError: If `r_out_attrs` are not in the rtable.
Examples:
>>> def match_last_name(ltuple, rtuple):
# assume that there is a 'name' attribute in the input tables
# and each value in it has two words
l_last_name = ltuple['name'].split()[1]
r_last_name = rtuple['name'].split()[1]
if l_last_name != r_last_name:
return True
else:
return False
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_black_box_blocker DaskBlackBoxBlocker
>>> bb = DaskBlackBoxBlocker()
>>> bb.set_black_box_function(match_last_name)
>>> C = bb.block_tables(A, B, l_output_attrs=['name'], r_output_attrs=['name'] )
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# validate data types of standard input parameters
self.validate_types_params_tables(ltable, rtable,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
verbose, 1)
# validate data type of show_progress
self.validate_show_progress(show_progress)
# validate black box function
assert self.black_box_function != None, 'Black box function is not set'
# validate output attributes
self.validate_output_attrs(ltable, rtable, l_output_attrs, r_output_attrs)
# get and validate metadata
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
# # get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger, verbose)
# # validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger, verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger, verbose)
# validate number of ltable and rtable chunks
validate_object_type(n_ltable_chunks, int, 'Parameter n_ltable_chunks')
validate_object_type(n_rtable_chunks, int, 'Parameter n_rtable_chunks')
validate_chunks(n_ltable_chunks)
validate_chunks(n_rtable_chunks)
# # determine the number of chunks
n_ltable_chunks = get_num_partitions(n_ltable_chunks, len(ltable))
n_rtable_chunks = get_num_partitions(n_rtable_chunks, len(rtable))
# do blocking
# # set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
|
ppizarror/korektor
|
bin/easyprocess/examples/log.py
|
Python
|
gpl-2.0
| 447
| 0
|
from easyprocess import EasyPro
|
cess # @UnresolvedImport
import logging
# turn on logging
logging.basicConfig(level=logging.DEBUG)
EasyProcess('python --version').call()
EasyProcess('ping localhost').start().sleep(1).stop()
EasyProcess('python --version').check()
try:
EasyProcess('bad_command').check()
except Exception, detail:
print detai
|
l
try:
EasyProcess('sh -c bad_command').check()
except Exception, detail:
print detail
|
SnowOnion/ero
|
acfunh.py
|
Python
|
mit
| 353
| 0.002882
|
#!/usr/bin/python2
#coding=utf8
import httplib
import urllib
im
|
port urllib2
import json
req = urllib2.Request('http://h.acfun.tv/综合版1.json')
res = urllib2.urlopen(req)
json_str = res.read()
json_dic = json.loads(json_str)
print json.dumps(json_dic, indent=4, encoding='utf-8')
# print json.dumps(json_dic['data']['replys']
|
.items()[0], indent=4)
|
klavinslab/coral
|
tests/tests/test_design/test_gibson.py
|
Python
|
mit
| 2,083
| 0
|
'''Test gibson design module.'''
from nose.tools import assert_equal, assert_raises
from coral import design, DNA, Primer
def test_gibson_primers():
'''Test gibson_primers function.'''
# Fuse tdh3 promoter sequence to yfp (trimmed for readability)
tdh3_3prime = DNA('aaccagttccctgaaattattcccctacttgactaataagtat' +
'ataaagacggtaggtattgattgtaattctgtaaatctatttc' +
'ttaaacttc')
yfp_nterm = DNA('atggtgagcaagggcgaggagctgttcaccggggtggtgcccatc' +
'ctggtcgagctggacggcgacgtaaacggccacaagttcagcgtg' +
'tccggcgagggcgagggcgatgccacctacggcaagctgaccctg' +
'aag')
# Expected annealing sequences and their Tms
fwd_anneal = DNA('atggtgagcaagggcg')
fwd_tm = 64.64172107821065
rev_anneal = DNA('gaagtttaagaaatagatttacagaattacaatcaatac')
rev_tm = 64.24536287254085
# Expected overlaps
all_right = DNA('TCGCCCTTGCTCACCAT')
all_left = DNA('GGTATTGATTGTAATTCTGTAAATCTATTTCTTAAACTTC')
mixed_fwd = DNA('TTCTTAAACTTC')
mixed_
|
rev = DNA('CCTTGCTCACCAT')
# Design primers - with homology all on left side, right side, or mixed
# All on the 'right' - i.e. fwd primer
right = design.gibson_primers(tdh3_3prime, yfp_nterm, 'right')
right_rev = Primer(rev_anneal, tm=rev_tm, overhang=all_right)
right_fwd = Primer(fwd_anneal, tm=fwd_tm)
ass
|
ert_equal(right, (right_rev, right_fwd))
# All on the 'left' - i.e. rev primer
left = design.gibson_primers(tdh3_3prime, yfp_nterm, 'left')
left_rev = Primer(rev_anneal, tm=rev_tm)
left_fwd = Primer(fwd_anneal, tm=fwd_tm, overhang=all_left)
assert_equal(left, (left_rev, left_fwd))
# On both primers
mixed = design.gibson_primers(tdh3_3prime, yfp_nterm, 'mixed')
mixed_primer1 = Primer(rev_anneal, tm=rev_tm, overhang=mixed_rev)
mixed_primer2 = Primer(fwd_anneal, tm=fwd_tm, overhang=mixed_fwd)
assert_equal(mixed, (mixed_primer1, mixed_primer2))
assert_raises(ValueError, design.gibson_primers, tdh3_3prime,
yfp_nterm, 'duck')
|
aewallin/allantools
|
tests/functional_tests/test_noise.py
|
Python
|
lgpl-3.0
| 556
| 0.01259
|
#!/usr/bin/python
import sys
sys.path.append("..")
from allantools import noise
import numpy
import pytest
def test_noise():
N = 500
#rate = 1.0
w = noise.white(N)
b = noise.brown(N)
v = noise.violet(N)
p = noise.pink(N)
# check output length
assert len(w) == N
assert len(b) == N
assert len(v) == N
assert len(p)
|
== N
# check output type
for x in [w, b, v, p]:
assert type(x)
|
== numpy.ndarray, "%s is not numpy.ndarray" % (type(x))
if __name__ == "__main__":
test_noise()
|
Hastu/educ_plateform
|
educ/authentification/admin.py
|
Python
|
mit
| 152
| 0.006579
|
from django.contrib import admin
from forum.models import *
admin.site.reg
|
ister(Student)
admi
|
n.site.register(Professor)
admin.site.register(Classroom)
|
Tackitt/flask-apispec
|
examples/petstore.py
|
Python
|
mit
| 2,262
| 0.007515
|
# -*- coding: utf-8 -*-
import six
import marshmallow as ma
from flask_apispec import ResourceMeta, Ref, doc, marshal_with, use_kwargs
class Pet:
def __init__(self, name, type):
self.name = name
self.type = type
class PetSchema(ma.Schema):
name = ma.fields.Str()
type = ma.fields.Str()
class PetResource(six.with_metaclass(ResourceMeta)):
@use_kwargs({
'category': ma.fields.Str(),
'name': ma.fields.Str(),
})
@marshal_with(PetSchema(), code=200)
def get(self):
return Pet('calici', 'cat')
class CatResource(PetResource):
@use_kwargs({'category': ma.fields.Int()})
@marshal_with(PetSchema(), code=201)
def get(self):
return Pet('calici', 'cat'), 200
###
class CrudResource(six.w
|
ith_metaclass(ResourceMeta)):
schema = None
@marshal_with(Ref('schema'), code=200)
def get(self, id):
pass
|
@marshal_with(Ref('schema'), code=200)
@marshal_with(Ref('schema'), code=201)
def post(self):
pass
@marshal_with(Ref('schema'), code=200)
def put(self, id):
pass
@marshal_with(None, code=204)
def delete(self, id):
pass
class PetResource(CrudResource):
schema = PetSchema
###
import flask
import flask.views
from flask_apispec import FlaskApiSpec
app = flask.Flask(__name__)
docs = FlaskApiSpec(app)
@app.route('/pets/<pet_id>')
@doc(params={'pet_id': {'description': 'pet id'}})
@marshal_with(PetSchema)
@use_kwargs({'breed': ma.fields.Str()})
def get_pet(pet_id):
return Pet('calici', 'cat')
docs.register(get_pet)
class MethodResourceMeta(ResourceMeta, flask.views.MethodViewType):
pass
class MethodResource(six.with_metaclass(MethodResourceMeta, flask.views.MethodView)):
methods = None
@doc(
tags=['pets'],
params={'pet_id': {'description': 'the pet name'}},
)
class CatResource(MethodResource):
@marshal_with(PetSchema)
def get(self, pet_id):
return Pet('calici', 'cat')
@marshal_with(PetSchema)
def put(self, pet_id):
return Pet('calici', 'cat')
app.add_url_rule('/cat/<pet_id>', view_func=CatResource.as_view('CatResource'))
docs.register(CatResource, endpoint='CatResource')
if __name__ == '__main__':
app.run(debug=True)
|
yebrahim/pydatalab
|
datalab/utils/_gcp_job.py
|
Python
|
apache-2.0
| 1,521
| 0.006575
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements GCP Job functionality."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datalab.context
from . import _job
class GCPJob(_job.Job):
"""Represents a BigQuery Job.
"""
def __init__(self, job_id, context):
"""Initializes an instance of a Job.
Args:
job_id: the BigQuery job ID corresponding to this job.
context: a Context object providing project_id and credentials.
"""
super(GCPJob, self).__init__(job_id)
if context is None:
|
context = datalab.context.Context.default()
self._context = context
self._api = self._create_api(context)
def _create_api(self, context):
raise Exception('_create_api must be defined in a derived class')
def __repr__(self):
"""Returns a representation for the job for showing in the notebook.
"""
return 'Job %s/%s %s' %
|
(self._context.project_id, self._job_id, self.state)
|
jfrancis71/TensorFlowApps
|
CZFaceDetection.py
|
Python
|
mit
| 9,947
| 0.036594
|
#
# The weights for this model come from training in a neural network library called CognitoNet which is now retired.
#
# That training session used images from the Face Scrub data set:
# http: http://vintage.winklerbros.net/facescrub.html
# H.-W. Ng, S. Winkler.
# A data-driven approach to cleaning large face datasets.
# Proc. IEEE International Conference on Image Processing (ICIP), Paris, France, Oct. 27-30, 2014.
#
# Example use:
# output = CZFaceDetection.CZHighlightImage( img, cnor.CZDetectFaces( img ) )
#
# You need to download the following two files and install them somewhere on youe search path.
# FaceNet2Convolve.json from https://drive.google.com/file/d/0Bzhe0pgVZtNUMFhfcGJwRE9sRWc/view?usp=sharing
# GenderNet.json from https://drive.google.com/file/d/0Bzhe0pgVZtNUaDY5ZzFiN2ZfTFU/view?usp=sharing
#
#Public Interfaces
#Timing: Approx 0.85s for 240x320 on MacOSX CPU
#Works like FindFaces, ie returns { {{x1,y1},{x2,y2}},... }
# On the Caltech 1999 face dataset, we achieve a recognition rate of around 92% with
# an average of 14% of false positives/image.
# The Caltech dataset has 450 images where most faces are quite close to camera,
# where images are of size 896x592. Most of these images are of good quality, but some
# are challenging, eg. cartoon, significant obscuring of face or poor lighting conditions.
# Reference comparison, FindFaces achieves 99.6% recognition, but 56% average false positive rate/image
def CZDetectFaces( pilImage, threshold=0.997 ):
return CZDeleteOverlappingWindows( CZMultiScaleDetectObjects( pilImage, CZFaceNet, threshold ) )
def CZHighlightImage( pilImage, rectangles ):
img = pilImage.copy()
draw = ImageDraw.Draw( img )
draw.rectangle
for obj in rectangles:
draw.rectangle( [ obj[0][0], obj[0][1], obj[1][0], obj[1][1] ], outline = 'green' )
draw.rectangle( [ obj[0][0]-1, obj[0][1]-1, obj[1][0]+1, obj[1][1]+1 ], outline = 'green' )
draw.rectangle( [ obj[0][0]-2, obj[0][1]-2, obj[1][0]+2, obj[1][1]+2 ], outline = 'green' )
return img
#returns a gender score ranging from 0 (most likely female) to 1 (most likely male
def CZGender( pilImage ):
norm = pilImage.convert( 'L' ).resize( ( 32, 32 ) )
tfImage = [ np.array( [ np.array( norm ) ] ).transpose( (1,2,0) ) / 255. ]
return CZFaceDetectSession.run( CZGenderNet[1], feed_dict = { CZGenderNet[0] : tfImage } )[0][0]
#Draws bounding boxes around detected faces and attempts to determine likely gender
def CZHighlightFaces( pilImage, threshold = .997 ):
objs = CZDetectFaces( pilImage, threshold )
img = pilImage.copy()
draw = ImageDraw.Draw( img )
draw.rectangle
for obj in objs:
crp = pilImage.crop( ( obj[0][0], obj[0][1], obj[1][0], obj[1][1] ) )
gender = CZGender( crp )
c1 = np.array( [ 255., 105., 180. ] )
c2 = np.array( [ 0., 0., 255. ] )
bld = blend( gender, c1, c2 )
color = "#%02x%02x%02x"%( (int)(bld[0]), (int)(bld[1]), (int)(bld[2]) )
draw.rectangle( [ obj[0][0], obj[0][1], obj[1][0], obj[1][1] ], outline = color )
draw.rectangle( [ obj[0][0]-1, obj[0][1]-1, obj[1][0]+1, obj[1][1]+1 ], outline = color )
draw.rectangle( [ obj[0][0]-2, obj[0][1]-2, obj[1][0]+2, obj[1][1]+2 ], outline = color )
return img
#Private Implementation Code
import tensorflow as tf
import numpy as np
from PIL import ImageDraw
import json
import math
import os
def CZReadNN( fileName ):
with open( fileName, 'r' ) as infile:
j = json.load( infile )
return j
CZFaceNetParameters = CZReadNN( os.path.join(os.path.expanduser('~'), 'FaceNet.json' ) )
#Takes a TensorFlow image array and builds a TensorFlow graph to process
#that image using the model parameters specified in modelFilename.
def buildObjectRecognitionGraph():
x_image = tf.placeholder( tf.float32, shape=[ 1, None, None, 1 ] )
h_conv1 = tf.nn.tanh( conv2d( x_image, CZFaceNetParameters[0] ) )
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.tanh( conv2d( h_pool1, CZFaceNetParameters[1] ) )
h_pool2 = max_pool_2x2(h_conv2)
h_conv3 = tf.nn.tanh( conv2d( h_pool2, CZFaceNetParameters[2] ) )
h_conv4 = tf.nn.sigmoid( conv2d( h_conv3, CZFaceNetParameters[3] ) )
return ( x_image, h_conv4 )
# Note the first part of w is the biases, the second is the weights
def conv2d(x, w):
return w[0] + tf.nn.conv2d(x, w[1], strides=[1, 1, 1, 1], padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME')
CZFaceNet = buildObjectRecognitionGraph()
CZFaceDetectSession = tf.Session()
# Conceptually it is a sliding window (32x32) object detector running at a single scale.
# In practice it is implemented convolutionally ( for performance reasons ) so the net
# should be fully convolutional, ie no fully connected layers.
# The net output should be a 2D array of numbers indicating a metric for likelihood of object being present.
# The net filter should accept an array of real numbers (ie this works on greyscale images). You can supply a
# colour image as input to the function, but this is just converted to greyscale before being fed to the neural net
# Note the geometric factor 4 in mapping from the output array to the input array, this is because we have
# downsampled twice in the neural network, so there is a coupling from this algorithm to the architecture
# of the neural net supplied.
# The rational for using a greyscale neural net is that I am particularly fascinated by shape (much more
# than colour), so wanted to look at performance driven by that factor alone. A commercial use might take
# a different view.
def CZSingleScaleDetectObjects( pilImage, tfGraph, threshold=0.997 ):
npImage = np.array( pilImage.convert( 'L' ) ) / 255.0
image = [ np.array( [ npImage ] ).transpose( 1,2,0 ) ]
outputMap = CZFaceDetectSession.run( tfGraph[1], feed_dict = { tfGraph[0] : image } )
extractPositions = np.transpose( np.nonzero( outputMap[0][:,:,0] > threshold ) )
objs = list( map( lambda x: (outputMap[0][:,:,0][x[0],x[1]],x[1]*4,x[0]*4), extractPositions ) )
return objs
# Implements a sliding window object detector at multiple scales.
# The function resamples the image at scales ranging from a minimum width of 32 up to 800 at 20% scale increments.
# The maximum width of 800 was chosen for 2 reasons: to limit inference run time and to limit the number of likely
# false positives / image, implying the detector's limit is to recognise faces larger than 32/800 (4%) of the image width.
# Note that if for example you had high resolution images with faces in the far distance and wanted to detect those and were
# willing to accept false positives within the image, you might reconsider that tradeoff.
# However, the main use case was possibly high resolution images where faces are not too distant with objective of limiting
# false positives across the image as a whole.
def CZMultiScaleDetectObjects( pilImage, tfGraph, threshold=0.997 ):
objRet = []
for s in range( -1 + int( ( math.log( 32 ) - math.log( pilImage.width ) ) / math.log (.8 ) ) ):
height = pilImage.height * .8**s
width = pilImage.width * .8**s
print( "idx = ", s, " width = ", width
|
)
image = pilImage.resize( ( int(width), int(height) ) )
objs = CZSingleScaleDetectObjects( image, tfGraph, threshold )
scale = pilImage.width / image.width
for obj in objs:
objRet.append( ( obj[0], ( scale*(16 + obj[1]
|
-16), scale*(16 + obj[2]-16) ), ( scale*(16 + obj[1]+16), scale*(16 + obj[2]+16) ) ) )
return objRet
def CZIntersection( a, b ):
xa=max(a[0][0],b[0][0])
ya=max(a[0][1],b[0][1])
xb=min(a[1][0],b[1][0])
yb=min(a[1][1],b[1][1]),
if ( xa>xb or ya>yb ):
ans = 0
else:
ans = (xb-xa+1)*(yb-ya+1)
return ans
def CZArea( a ):
return ( a[0][0]-a[1][0] ) * ( a[0][1]-a[1][1] )
def CZUnion( a, b ):
return CZArea( a ) + CZArea( b ) - CZIntersection( a, b )
def CZIntersectionOverUnion
|
michielkauwatjoe/Meta
|
meta/rgb.py
|
Python
|
mit
| 524
| 0.009542
|
#!/usr/bin/env python3
'''
bm = sd.Bitmap(Width,Height)
for x in xrange(Height*Width):
j= x // Width
i= x % Width
col = Color[x]
bm.SetPixel(i,j,col)
bm.Save(PathWrite,sd.Imaging.ImageFormat.Bmp)
'''
from PIL import Image
w = h = 255
img = Image.new( 'RGB', (w, h), "black") # Create a new black image
pixels = img.
|
load() # Create the pixel map
for i in range(img.size[0]): # For every pixel:
|
for j in range(img.size[1]):
pixels[i,j] = (i, j, 100) # Set the colour accordingly
img.show()
|
rx2130/Leetcode
|
python/7 Reverse Integer.py
|
Python
|
apache-2.0
| 710
| 0.002817
|
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
# Op1:
isNegative = False
if x < 0:
isNegative = True
x *= -1
x = int(str(x)[::-1])
if isNegative:
x *= -1
return x if abs(x) < 2**31-1 else 0
def reverse2(self, x):
sign = 1
if x < 0:
|
sign = -1
x *= -1
num = 0
while x:
if abs(num) > 214748364:
return 0
num = num * 10 + x % 10
|
x //= 10
return num * sign
test = Solution()
print(test.reverse(1000000003))
print(test.reverse2(-1000000003))
|
mathemage/h2o-3
|
h2o-py/h2o/expr.py
|
Python
|
apache-2.0
| 15,701
| 0.004204
|
# -*- encoding: utf-8 -*-
"""
Rapids expressions. These are helper classes for H2OFrame.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import collections
import copy
import gc
import math
import sys
import time
import tabulate
import h2o
from h2o.backend.connection import H2OConnectionError
from h2o.utils.compatibility import * # NOQA
from h2o.utils.compatibility import repr2, viewitems, viewvalues
from h2o.utils.shared_utils import _is_fr, _py_tmp_key
from h2o.model.model_base import ModelBase
from h2o.expr_optimizer import optimize
class ExprNode(object):
"""
Composable Expressions: This module contains code for the lazy expression DAG.
Execution Overview
------------------
The job of ExprNode is to provide a layer of indirection to H2OFrame instances that
are built of arbitrarily large, lazy expression DAGs. In order to do this job well,
ExprNode must also track top-level entry points to the such DAGs, maintain a sane
amount of state to know which H2OFrame instances are temporary (or not), and maintain
a cache of H2OFrame properties (nrows, ncols, types, names, few rows of data).
Top-Level Entry Points
----------------------
An expression is declared top-level if it
A) Computes and returns an H2OFrame to some on-demand call from somewhere
B) An H2OFrame instance has more referrers than the 4 needed for the usual flow
of python execution (see MAGIC_REF_COUNT below for more details).
Sane Amount of State
--------------------
Instances of H2OFrame live and die by the state contained in the _ex field. The three
pieces of state -- _op, _children, _cache -- are the fewest pieces of state (and no
fewer) needed to unambiguously track temporary H2OFrame instances and prune
them according to the usual scoping laws of python.
If _cache._id is None, then this DAG has never been sent over to H2O, and there's
nothing more to do when the object goes out of scope.
If _cache._id is not None, then there has been some work done by H2O to compute the
big data object sitting in H2O to which _id points. At the time that __del__ is
called on this object, a determination to throw out the corresponding data in H2O or
to keep that data is made by the None'ness of _children.
tl;dr:
If _cache._id is not None and _children is None, then do not delete in H2O cluster
If _cache._id is not None and _children is not None, then do delete in H2O cluster
H2OCache
--------
To prevent several unnecessary REST calls and unnecessary execution, a few of the
oft-needed attributes of the H2OFrame are cached for convenience. The primary
consumers of these cached values are __getitem__, __setitem__, and a few other
H2OFrame ops that do argument validation or exchange (e.g. colnames for indices).
There are more details available under the H2OCache class declaration.
"""
# Magical count-of-5: (get 2 more when looking at it in debug mode)
# 2 for _get_ast_str frame, 2 for _get_ast_str local dictionary list, 1 for parent
MAGIC_REF_COUNT = 5 if sys.gettrace() is None else 7 # M = debug ? 7 : 5
# Flag to control application of local expression tree optimizations
__ENABLE_EXPR_OPTIMIZATIONS__ = True
def __init__(self, op="", *args):
# assert isinstance(op, str), op
self._op = op # Base opcode string
self._children = tuple(
a._ex if _is_fr(a) else a for a in args) # ast children; if not None and _cache._id is not None then tmp
self._cache = H2OCache() # ncols, nrows, names, types
# try to fuse/simplify expression
if self.__ENABLE_EXPR_OPTIMIZATIONS__:
self._optimize()
def _eager_frame(self):
if not self._cache.is_empty(): return
if self._cache._id is not None: return # Data already computed under ID, but not cached locally
self._eval_driver(True)
def _eager_scalar(self): # returns a scalar (or a list of scalars)
if not self._cache.is_empty():
assert self._cache.is_scalar()
return self
assert self._cache._id is None
self._eval_driver(False)
assert self._cache._id is None
assert self._cache.is_scalar()
return self._cache._data
def _eval_driver(self, top):
exec_str = self._get_ast_str(top)
res = ExprNode.rapids(exec_str)
if 'scalar' in res:
if isinstance(res['scalar'], list):
self._cache._data = [float(x) for x in res['scalar']]
else:
self._cache._data = None if res['scalar'] is None else float(res['scalar'])
if 'string' in res: self._cache._data = res['string']
if 'funstr' in res: raise NotImplementedError
if 'key' in res:
self._cache.nrows = res['num_rows']
self._cache.ncols = res['num_cols']
return self
def _optimize(self):
while True:
opt = optimize(self)
if opt is not None:
opt(ctx=None)
else:
break
# Recursively build a rapids execution string. Any object with more than
# MAGIC_REF_COUNT referrers will be cached as a temp until the next client GC
# cycle - consuming memory. Do Not Call This except when you need to do some
# other cluster opera
|
tion on the evaluated object. Examples might be: lazy
# dataset time parse vs changing the global timezone. Global timezone change
# is eager, so the time parse as to occur in the correct order relative to
# the timezone change, so cannot be lazy.
#
def _get_ast_str(self, top):
if not self._cache.is_empty(): # Data already computed and cached; could a "false-like" cached value
|
return str(self._cache._data) if self._cache.is_scalar() else self._cache._id
if self._cache._id is not None:
return self._cache._id # Data already computed under ID, but not cached
# assert isinstance(self._children,tuple)
exec_str = "({} {})".format(self._op, " ".join([ExprNode._arg_to_expr(ast) for ast in self._children]))
gc_ref_cnt = len(gc.get_referrers(self))
if top or gc_ref_cnt >= ExprNode.MAGIC_REF_COUNT:
self._cache._id = _py_tmp_key(append=h2o.connection().session_id)
exec_str = "(tmp= {} {})".format(self._cache._id, exec_str)
return exec_str
@staticmethod
def _arg_to_expr(arg):
if arg is None:
return "[]" # empty list
if isinstance(arg, ExprNode):
return arg._get_ast_str(False)
if isinstance(arg, ASTId):
return str(arg)
if isinstance(arg, (list, tuple, range)):
return "[%s]" % " ".join(repr2(x) for x in arg)
if isinstance(arg, slice):
start = 0 if arg.start is None else arg.start
stop = float("nan") if arg.stop is None else arg.stop
step = 1 if arg.step is None else arg.step
assert start >= 0 and step >= 1 and (math.isnan(stop) or stop >= start + step)
if step == 1:
return "[%d:%s]" % (start, str(stop - start))
else:
return "[%d:%s:%d]" % (start, str((stop - start + step - 1) // step), step)
if isinstance(arg, ModelBase):
return arg.model_id
return repr2(arg)
def __del__(self):
try:
if self._cache._id is not None and self._children is not None:
ExprNode.rapids("(rm {})".format(self._cache._id))
except (AttributeError, H2OConnectionError):
pass
def arg(self, idx):
return self._children[idx]
def args(self):
return self._children
def narg(self):
return len(self._children)
@staticmethod
def _collapse_sb(sb):
return ' '.join(""
|
itoijala/pyfeyner
|
tests/test-bend90a.py
|
Python
|
gpl-2.0
| 1,085
| 0.000922
|
#
# pyfeyner - a simple Python interface for making Fey
|
nman diagrams.
# Copyright (C) 2005-2010 Andy Buckley, Georg von Hippel
# Copyright (C) 2013 Ismo Toijala
#
# pyfeyner is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyfeyner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
#
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with pyfeyner; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from pyfeyner import *
from pyfeyner.user import *
fd = FeynDiagram()
pt_in = Vertex(0, 0)
pt_out = Vertex(0, 6)
#pt_out = Vertex(6, 0)
vtx = Vertex(3, 3)
f = Fermion(pt_in, pt_out).arcThru(vtx)
fd.draw("test-bend90a.pdf")
|
coolcooldool/tencent-weibo-exporter
|
loginver4/tencent_util.py
|
Python
|
apache-2.0
| 10,110
| 0.006012
|
# -*- coding: utf-8 -*-
'''
Created on 2017/09/14
@author: yuyang
'''
import os
import urllib
import uuid
import re
import docx_ext
from docx.shared import Pt
from docx.shared import RGBColor
from docx.shared import Inches
JPEG_EXTENSION = '.jpg'
PNG_EXTENSION = '.png'
GIF_EXTENSION = '.gif'
SPLIT_STRING = '///'
TOPIC_STRING = 'TTOOPPIICC'
EMOJI_STRING = 'EEMMOOJJII'
FRIEND_STRING = 'FFRRIIEENNDD'
URL_STRING = 'UURRLL'
QQEMO_STRING = 'QQEEMMOO'
OTHEREMO_STRING = 'OOTTHHEERR'
def add_author(document, author):
para = document.add_paragraph()
run = para.add_run(author)
font = run.font
#font.name = 'Microsoft YaHei'
font.size = Pt(12)
font.color.rgb = RGBColor(0x43, 0x6E, 0xEE)
def add_content(document, content, para = None, font_size = 16):
print content
if content.__contains__('k.t.qq.com'):
pattern = re.compile(r'(<a href="http://k.t.qq.com.*?</a>)', re.S)
topics = re.findall(pattern, content)
for topic in topics:
topic_word = topic.split('#')[1]
content = content.replace(topic, SPLIT_STRING + TOPIC_STRING + '#' + topic_word + '#' + SPLIT_STRING)
if content.__contains__('www/mb/images/emoji'):
pattern_emoji = re.compile(r'(<img.*?>)', re.S)
pattern_emoji_img = re.compile(r'crs="(.*?)"', re.S)
emojis = re.findall(pattern_emoji, content)
for emoji in emojis:
emoji_url = re.findall(pattern_emoji_img, emoji)[0]
filename = download_pic(emoji_url, PNG_EXTENSION)
content = content.replace(emoji, SPLIT_STRING + EMOJI_STRING + filename + SPLIT_STRING)
if content.__contains__('em rel="@'):
pattern_friend = re.compile(r'(<em rel=.*?</em>)', re.S)
pattern_friend_name = re.compile(r'<em.*?title="(.*?)"', re.S)
friends = re.findall(pattern_friend, content)
for friend in friends:
friend_name = re.findall(pattern_friend_name, friend)[0]
content = content.replace(friend, SPLIT_STRING + FRIEND_STRING + friend_name + SPLIT_STRING)
if content.__contains__('http://url.cn'):
pattern_url = re.compile(r'(<a href=.*?</a>)', re.S)
pattern_url_str = re.compile(r'<a href="(.*?)"', re.S)
urls = re.findall(pattern_url, content)
for url in urls:
url_str = re.findall(pattern_url_str, url)[0]
content = content.replace(url, SPLIT_STRING + URL_STRING + url_str + SPLIT_STRING)
if content.__contains__('www/mb/images/face'):
pattern_qqemo = re.compile(r'(<img.*?>)', re.S)
pattern_qqemo_img = re.compile(r'crs="(.*?)"', re.S)
qqemos = re.findall(pattern_qqemo, content)
for qqemo in qqemos:
qqemo_url = re.findall(pattern_qqemo_img, qqemo)[0]
filename = download_pic(qqemo_url, GIF_EXTENSION)
content = content.replace(qqemo, SPLIT_STRING + QQEMO_STRING + filename + SPLIT_STRING)
if content.__contains__('<img class='):
pattern_other_emo = re.compile(r'(<img.*?>)', re.S)
pattern_other_emo_img = re.compile(r'<img.*?crs=(.*?) title=', re.S)
pattern_other_emo_img_only = re.compile(r'<img.*?crs=(.*?)>', re.S)
pattern_other_emos = re.findall(pattern_other_emo, content)
for other_emo in pattern_other_emos:
other_emo_match = re.findall(pattern_other_emo_img, other_emo)
if not other_emo_match:# some emoji have special pattern
other_emo_match = re.findall(pattern_other_emo_img_only, other_emo)
other_emo_url = other_emo_match[0]
other_emo_url = other_emo_url[1:-1]# delete start and end mark ' "
filename = download_pic(other_emo_url, other_emo_url[-4:])
content = content.replace(other_emo, SPLIT_STRING + OTHEREMO_STRING + filename + SPLIT_STRING)
content_parts = content.split(SPLIT_STRING)
if not para:
para = document.add_paragraph()
for content_part in content_parts:
# delete first <div> mark
if content_part.startswith('<div>'):
content_part = content_part[5:]
if content_part.endswith('</div>'):
content_part = content_part[:-6]
if content_part.startswith(TOPIC_STRING):
run = para.add_run(content_part.replace(TOPIC_STRING, ''))
font = run.font
font.italic = True
font.bold = False
font.size = Pt(font_size)
font.color.rgb = RGBColor(0x00, 0x00, 0xCD)
elif content_part.startswith(EMOJI_STRING):
run = para.add_run()
filename = content_part.replace(EMOJI_STRING, '')
run.add_picture(filename)
elif content_part.startswith(FRIEND_STRING):
run = para.add_run(content_part.replace(FRIEND_STRING, ''))
font = run.font
font.italic = True
font.bold = False
font.size = Pt(font_size - 2)
font.color.rgb = RGBColor(0xFF, 0x45, 0x00)
elif content_part.startswith(URL_STRING):
docx_ext.add_hyperlink(para, content_part.replace(URL_STRING, ''),
content_part.replace(URL_STRING, ''), '1E90FF', True)
elif content_part.startswith(QQEMO_STRING):
run = para.add_run()
filename = content_part.replace(QQEMO_STRING, '')
run.add_picture(filename)
elif content_part.startswith(OTHEREMO_STRING):
run = para.add_run()
filename = content_part.replace(OTHEREMO_STRING, '')
run.add_picture(filename)
else:
content_part = content_part.replace('&', '&')
content_part = content_part.replace('>', '>')
content_part = content_part.replace('"', '"')
content_part = content_part.replace('<', '<')
run = para.add_run(content_part)
font = run.font
font.bold = False
font.size = Pt(font_size)
font.color.
|
rgb = RGBColor(0x08, 0x08, 0x08)
def add_picture(document, story):
filenames = analyze_pic(story)
for filename
|
in filenames:
try:
document.add_picture(filename, width=Inches(5))
except:
print '插入图片出错:' + filename
def add_time(document, time):
para = document.add_paragraph()
run = para.add_run(time)
font = run.font
font.italic = True
#font.name = 'Microsoft YaHei'
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
def add_location(document, story):
location_items = analyze_loc(story)
if len(location_items) <= 0:
return
link_name = location_items[2]
google_map_url = 'https://maps.google.com/maps?q=' + location_items[0] + ',' + location_items[1]
para = document.add_paragraph()
run = para.add_run(u'位置:')
font = run.font
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
docx_ext.add_hyperlink(para, google_map_url, link_name, '4169E1', False)
def add_video(document, story):
video_items = analyze_video(story)
if not video_items:
return
para = document.add_paragraph()
run = para.add_run()
font = run.font
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
docx_ext.add_hyperlink(para, video_items[0], video_items[1], '4169E1', False)
try:
document.add_picture(video_items[3], width=Inches(3))
except:
print '视频封面插入出错:' + video_items[3]
def download_pic(url, extension):
try:
if not os.path.exists('.//pics'):
os.mkdir('.//pics')
filename = '.\\pics\\' + str(uuid.uuid4()) + extension
urllib.urlretrieve(url, filename)
except Exception:
print '下载图片出错: ' + url
return filename
def analyze_pic(story):
filenames = []
picBox = None
imgGroup = None
try:
picBox = story.find_element_by_class_name('picBox')
except:
None
try:
imgGroup = story.find_element_by_class_name('tl_imgGroup')
except:
None
if picBox:# one pictu
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/GLES2/NV/read_depth_stencil.py
|
Python
|
lgpl-3.0
| 785
| 0.008917
|
'''OpenGL extension NV.read_depth_stencil
This module customises the behaviour of the
OpenGL.raw.GLES2.NV.read_depth_stencil to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/read_depth_stencil.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.NV.r
|
ead_depth_stencil import *
from OpenGL.raw.GLES2.NV.read_depth_stencil import _EXTENSION_NAME
def glInitReadDepthStencilNV():
'''R
|
eturn boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
TimeSynth/TimeSynth
|
timesynth/signals/car.py
|
Python
|
mit
| 1,472
| 0.001359
|
import numpy as np
from .base_signal import BaseSignal
__all__ = ['CAR']
class CAR(BaseSignal):
"""Signal generatpr for continuously autoregressive (CAR) signals.
Parameters
----------
ar_param : number (default 1.0)
Parameter of the AR(1) process
|
sigma
|
: number (default 1.0)
Standard deviation of the signal
start_value : number (default 0.0)
Starting value of the AR process
"""
def __init__(self, ar_param=1.0, sigma=0.5, start_value=0.01):
self.vectorizable = False
self.ar_param = ar_param
self.sigma = sigma
self.start_value = start_value
self.previous_value = None
self.previous_time = None
def sample_next(self, time, samples, errors):
"""Sample a single time point
Parameters
----------
time : number
Time at which a sample was required
Returns
-------
float
sampled signal for time t
"""
if self.previous_value is None:
output = self.start_value
else:
time_diff = time - self.previous_time
noise = np.random.normal(loc=0.0, scale=1.0, size=1)
output = (np.power(self.ar_param, time_diff))*self.previous_value+\
self.sigma*np.sqrt(1-np.power(self.ar_param, time_diff))*noise
self.previous_time = time
self.previous_value = output
return output
|
jhgg/discord.py
|
discord/colour.py
|
Python
|
mit
| 6,401
| 0.003749
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
class Colour:
"""Represents a Discord role colour. This class is similar
to an (red, green, blue) tuple.
There is an alias for this called Color.
Supported operations:
+-----------+----------------------------------------+
| Operation | Description |
+===========+========================================+
| x == y | Checks if two colours are equal. |
+-----------+----------------------------------------+
| x != y | Checks if two colours are not equal. |
+-----------+----------------------------------------+
| hash(x) | Return the colour's hash. |
+-----------+----------------------------------------+
| str(x) | Returns the hex format for the colour. |
+-----------+----------------------------------------+
Attributes
------------
value : int
The raw integer colour value.
"""
__slots__ = [ 'value' ]
def __init__(self, value):
self.value = value
def _get_byte(self, byte):
return (self.value >> (8 * byte)) & 0xff
def __eq__(self, other):
return isinstance(other, Colour) and self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return '#' + format(self.value, 'x')
def __hash__(self):
return hash(self.value)
@property
def r(self):
"""Returns the red component of the colour."""
return self._get_byte(2)
@property
def g(self):
"""Returns the green component of the colour."""
return self._get_byte(1)
@property
def b(self):
"""Returns the blue component of the colour."""
return self._get_byte(0)
def to_tuple(self):
"""Returns an (r, g, b) tuple representing the colour."""
return (self.r, self.g, self.b)
@classmethod
def default(cls):
"""A factory method that returns a :class:`Colour` with a value of 0."""
return cls(0)
@classmethod
def teal(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x1abc9c``."""
return cls(0x1abc9c)
@classmethod
def dark_teal(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x11806a``."""
return cls(0x11806a)
@classmethod
def green(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x2ecc71``."""
return cls(0x2ecc71)
@classmethod
def dark_green(cls):
"""A fa
|
ctory method that returns a :class:`Colour` with a value of ``0x1f8b4c``."""
ret
|
urn cls(0x1f8b4c)
@classmethod
def blue(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x3498db``."""
return cls(0x3498db)
@classmethod
def dark_blue(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x206694``."""
return cls(0x206694)
@classmethod
def purple(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x9b59b6``."""
return cls(0x9b59b6)
@classmethod
def dark_purple(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x71368a``."""
return cls(0x71368a)
@classmethod
def magenta(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0xe91e63``."""
return cls(0xe91e63)
@classmethod
def dark_magenta(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0xad1457``."""
return cls(0xad1457)
@classmethod
def gold(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0xf1c40f``."""
return cls(0xf1c40f)
@classmethod
def dark_gold(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0xc27c0e``."""
return cls(0xc27c0e)
@classmethod
def orange(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0xe67e22``."""
return cls(0xe67e22)
@classmethod
def dark_orange(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0xa84300``."""
return cls(0xa84300)
@classmethod
def red(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0xe74c3c``."""
return cls(0xe74c3c)
@classmethod
def dark_red(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x992d22``."""
return cls(0x992d22)
@classmethod
def lighter_grey(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x95a5a6``."""
return cls(0x95a5a6)
@classmethod
def dark_grey(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x607d8b``."""
return cls(0x607d8b)
@classmethod
def light_grey(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x979c9f``."""
return cls(0x979c9f)
@classmethod
def darker_grey(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x546e7a``."""
return cls(0x546e7a)
Color = Colour
|
enigmampc/catalyst
|
catalyst/finance/performance/position.py
|
Python
|
apache-2.0
| 7,850
| 0.000127
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Position Tracking
=================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| asset | the asset held in this position |
+-----------------+----------------------------------------------------+
| amount | whole number of shares in the position |
+-----------------+----------------------------------------------------+
| last_sale_price | price at last sale of the asset on the exchange |
+-----------------+----------------------------------------------------+
| cost_basis | the volume weighted average price paid per share |
+-----------------+----------------------------------------------------+
"""
from __future__ import division
from math import copysign
from collections import OrderedDict
import numpy as np
import logbook
from catalyst.assets import Future, Asset
from catalyst.utils.input_validation import expect_types
from catalyst.constants import LOG_LEVEL
log = logbook.Logger('Performance', level=LOG_LEVEL)
class Position(object):
@expect_types(asset=Asset)
def __init__(self, asset, amount=0, cost_basis=0.0,
last_sale_price=0.0, last_sale_date=None):
self.asset = asset
self.amount = amount
self.cost_basis = cost_basis # per share
self.last_sale_price = last_sale_price
self.last_sale_date = last_sale_date
def earn_dividend(self, dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'amount': self.amount * dividend.amount
}
def earn_stock_dividend(self, stock_dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'payment_asset': stock_dividend.payment_asset,
'share_co
|
unt': np.floor(
self.amount * float(stock_dividend.ratio)
)
}
@expect_types(asset=Asset)
def handle_split(self, asset, ratio):
"""
Update the position by
|
the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash
def update(self, txn):
if self.asset != txn.asset:
raise Exception('updating position with txn for a '
'different asset')
total_shares = self.amount + txn.amount
if total_shares == 0:
self.cost_basis = 0.0
else:
prev_direction = copysign(1, self.amount)
txn_direction = copysign(1, txn.amount)
if prev_direction != txn_direction:
# we're covering a short or closing a position
if abs(txn.amount) > abs(self.amount):
# we've closed the position and gone short
# or covered the short position and gone long
self.cost_basis = txn.price
else:
prev_cost = self.cost_basis * self.amount
txn_cost = txn.amount * txn.price
total_cost = prev_cost + txn_cost
self.cost_basis = total_cost / total_shares
# Update the last sale price if txn is
# best data we have so far
if self.last_sale_date is None or txn.dt > self.last_sale_date:
self.last_sale_price = txn.price
self.last_sale_date = txn.dt
# on live mode, if the fee currency exists, reduce the commission
# from the position if necessary.
# Notice! the fee_currency is compared to the base_currency- once it
# is allowed to have more than one quote currency, the comparison is
# needed to be changed
if txn.commission is not None and \
txn.fee_currency == self.asset.base_currency:
total_shares -= txn.commission
self.amount = total_shares
@expect_types(asset=Asset)
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in catalyst: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how catalyst handles positions, catalyst will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception('Updating a commission for a different asset?')
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount
def __repr__(self):
template = "asset: {asset}, amount: {amount}, cost_basis: {cost_basis}, \
last_sale_price: {last_sale_price}"
return template.format(
asset=self.asset,
amount=self.amount,
cost_basis=self.cost_basis,
last_sale_price=self.last_sale_price
)
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
'sid': self.asset,
'amount': self.amount,
'cost_basis': self.cost_basis,
'last_sale_price': self.last_sale_price
}
class positiondict(OrderedDict):
def __missing__(self, key):
return None
|
dalimatt/Instastalk
|
dependencies/workflow/background.py
|
Python
|
mit
| 7,361
| 0.001359
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2014 deanishe@deanishe.net
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-04-06
#
"""
Run background tasks
"""
from __future__ import print_function, unicode_literals
import sys
import os
import subprocess
import pickle
from workflow import Workflow
__all__ = ['is_running', 'run_in_background']
_wf = None
def wf():
global _wf
if _wf is None:
_wf = Workflow()
return _wf
def _arg_cache(name):
"""Return path to pickle cache file for arguments
:param name: name of task
:type name: ``unicode``
:returns: Path to cache file
:rtype: ``unicode`` filepath
"""
return wf().cachefile('{0}.argcache'.format(name))
def _pid_file(name):
"""Return path to PID file for ``name``
:param name: name of task
:type name: ``unicode``
:returns: Path to PID file for task
:rtype: ``unicode`` filepath
"""
return wf().cachefile('{0}.pid'.format(name))
def _process_exists(pid):
"""Check if a process with PID ``pid`` exists
:param pid: PID to check
:type pid: ``int``
:returns: ``True`` if process exists, else ``False``
:rtype: ``Boolean``
"""
try:
os.kill(pid, 0)
except OSError: # not running
return False
return True
def is_running(name):
"""
Test whether task is running under ``name``
:param name: name of task
:type name: ``unicode``
:returns: ``True`` if task with name ``name`` is running, else ``False``
:rtype: ``Boolean``
"""
pidfile = _pid_file(name)
if not os.path.exists(pidfile):
return False
with open(pidfile, 'rb') as file_obj:
pid = int(file_obj.read().strip())
if _process_exists(pid):
return True
elif os.path.exists(pidfile):
os.unlink(pidfile)
return False
def _background(stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'): # pragma: no cover
"""Fork the current process into a background daemon.
:param stdin: where to read input
:type stdin: filepath
:param stdout: where to write stdout output
:type stdout: filepath
:param stderr: where to write stderr output
:type stderr: filepath
"""
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError as e:
wf().logger.critical("fork #1 failed: ({0:d}) {1}".format(
e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir(wf().workflowdir)
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError as e:
wf().logger.critical("fork #2 failed: ({0:d}) {1}".format(
e.errno, e.strerror))
sys.exit(1)
# Now I am a daemon!
# Redirect standard file descriptors.
si = file(stdin, 'r', 0)
so = file(stdout, 'a+', 0)
se = file(stderr, 'a+', 0)
if hasattr(sys.stdin, 'fileno'):
os.dup2(si.fileno(), sys.stdin.fileno())
if hasattr(sys.stdout, 'fileno'):
os.dup2(so.fileno(), sys.stdout.fileno())
if hasattr(sys.stderr, 'fileno'):
os.dup2(se.fileno(), sys.stderr.fileno())
def run_in_background(name, args, timeout=None, **kwargs):
"""Pickle arguments to cache file, then call this script again via
:func:`subprocess.call`.
:param name: name of task
:type name: ``unicode``
:param args: arguments passed as first argument to :func:`subprocess.call`
:param \**kwargs: keyword arguments to :func:`subprocess.call`
:returns: exit code of sub-process
:rtype: ``int``
When you call this function, it caches its arguments and then calls
``background.py`` in a subprocess. The Python subprocess will load the
cached arguments, fork into the background, and then run the command you
specified.
This function will return as soon as the ``background.py`` subprocess has
forked, returning the exit code of *that* process (i.e. not of the command
you're trying to run).
If that process fails, an error will be written to the log file.
If a process is already running under the same name, this function will
retu
|
rn immediately and will not run the specified command.
"""
if is_running(name):
wf().logger.info('Task `{0}` is already running'.format(name))
ret
|
urn
argcache = _arg_cache(name)
# Cache arguments
with open(argcache, 'wb') as file_obj:
pickle.dump({'args': args, 'kwargs': kwargs}, file_obj)
wf().logger.debug('Command arguments cached to `{0}`'.format(argcache))
# Call this script
cmd = ['/usr/bin/python', __file__, name, str(timeout)]
wf().logger.debug('Calling {0!r} ...'.format(cmd))
retcode = subprocess.call(cmd)
if retcode: # pragma: no cover
wf().logger.error('Failed to call task in background')
else:
wf().logger.debug('Executing task `{0}` in background...'.format(name))
return retcode
def main(wf): # pragma: no cover
"""
Load cached arguments, fork into background, then call
:meth:`subprocess.call` with cached arguments
"""
name = wf.args[0]
timeout_str = wf.args[1]
argcache = _arg_cache(name)
if not os.path.exists(argcache):
wf.logger.critical('No arg cache found : {0!r}'.format(argcache))
return 1
# Load cached arguments
with open(argcache, 'rb') as file_obj:
data = pickle.load(file_obj)
# Cached arguments
args = data['args']
kwargs = data['kwargs']
# Delete argument cache file
os.unlink(argcache)
pidfile = _pid_file(name)
# Fork to background
_background()
# Write PID to file
with open(pidfile, 'wb') as file_obj:
file_obj.write('{0}'.format(os.getpid()))
# Run the command
try:
wf.logger.debug('Task `{0}` running'.format(name))
wf.logger.debug('cmd : {0!r}'.format(args))
# wf.logger.debug('About to create sub process')
process = subprocess.Popen(args, **kwargs)
retcode = process.returncode
# wf.logger.debug('Created process: pid:{0}'.format(process.pid))
# Write PID to file
with open(pidfile, 'wb') as file_obj:
file_obj.write('{0}'.format(process.pid))
# Create watchdog process if there is a timeout
if timeout_str != 'None':
watchdog = wf.workflowfile('watchdog.py')
cmd = ['/usr/bin/python', watchdog,
str(process.pid), timeout_str]
wf.logger.debug('cmd : {0!r}'.format(cmd))
retcode2 = subprocess.call(cmd)
if retcode2:
wf.logger.error('Command failed with [{0}] : {1!r}'.format(
retcode2, cmd))
if retcode:
wf.logger.error('Command failed with [{0}] : {1!r}'.format(
retcode, args))
finally:
# if os.path.exists(pidfile):
# os.unlink(pidfile)
wf.logger.debug('Task `{0}` finished'.format(name))
if __name__ == '__main__': # pragma: no cover
wf().run(main)
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/python/Lib/power/__init__.py
|
Python
|
agpl-3.0
| 1,441
| 0.002776
|
# coding=utf-8
"""
Provides crossplatform checking of current power source, battery warning level and battery time remaining estimate.
Allows you to add observer for power notifications if platform supports it.
Usage:
from power import PowerManagement, PowerManagementObserver # Automatically imports platform-specific implementation
class Observer(PowerManagementObserver):
def on_power_sources_change(self, power_management):
print "Power sources did change."
def on_time_remaining_change(self, power_management):
print "Time remaining did change."
# class Observer(object):
# ...
# PowerManagementObserver.register(Observer)
"""
__author__ = 'kulakov.ilya@gmail.com'
__version__ = '1.2'
from sys import platform
from power.common import *
try:
if platform.startswith('darwin'):
from power.darwin import PowerManagement
elif platform.startswith('win32'):
from power.win32 import PowerManagement
elif platform.startswith('linux'):
from power.linux import PowerManagement
else:
raise RuntimeError("{platform} is not supported.".format(platform=platform))
except RuntimeError as e:
import warnings
warnings.warn("Unable to load PowerManagement for {platform}. No-op PowerManagement class is used: {error}".format(
|
error=str(e), platform=platform))
from power.common import PowerManagemen
|
tNoop as PowerManagement
|
nirvaris/nirvaris-dictionary
|
dictionary/migrations/0012_auto_20160521_1234.py
|
Python
|
mit
| 887
| 0.002255
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-
|
05-21 12:34
from __future__ import unicode_literals
import dictionary.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dictionary'
|
, '0011_auto_20160503_1535'),
]
operations = [
migrations.RenameField(
model_name='picture',
old_name='image',
new_name='full',
),
migrations.AddField(
model_name='picture',
name='small',
field=models.ImageField(max_length=1024, null=True, upload_to=dictionary.models.user_directory_path),
),
migrations.AddField(
model_name='picture',
name='tinny',
field=models.ImageField(max_length=1024, null=True, upload_to=dictionary.models.user_directory_path),
),
]
|
savi-dev/horizon
|
horizon/views/auth_forms.py
|
Python
|
apache-2.0
| 8,511
| 0.000587
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Forms used for Horizon's auth mechanisms.
"""
import logging
from django import shortcuts
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.utils.translation import ugettext as _
from django.views.decorators.debug import sensitive_variables
from keystoneclient import exceptions as keystone_exceptions
from horizon import api
from horizon import base
from horizon import exceptions
from horizon import forms
from horizon import users
LOG = logging.getLogger(__name__)
def _set_session_data(request, token):
request.session['serviceCatalog'] = token.serviceCatalog
request.session['tenant'] = token.tenant['name']
request.session['tenant_id'] = token.tenant['id']
request.session['token'] = token.id
request.session['user_name'] = token.user['name']
request.session['user_id'] = token.user['id']
request.session['roles'] = token.user['roles']
class Login(forms.SelfHandlingForm):
""" Form used for logging in a user.
Handles authentication with Keystone, choosing a tenant, and fetching
a scoped token token for that tenant. Redirects to the URL returned
by :meth:`horizon.get_user_home` if successful.
Subclass of :class:`~horizon.forms.SelfHandlingForm`.
"""
region = forms.ChoiceField(label=_("Region"), required=False)
username = forms.CharField(label=_("User Name"))
password = forms.CharField(label=_("Password"),
widget=forms.PasswordInput(render_value=False))
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
# FIXME(gabriel): When we switch to region-only settings, we can
# remove this default region business.
default_region = (settings.OPENSTACK_KEYSTONE_URL, "Default Region")
regions = getattr(settings, 'AVAILABLE_REGIONS', [default_region])
self.fields['region'].choices = regions
if len(regions) == 1:
self.fields['region'].initial = default_region[0]
self.fields['region'].widget = forms.widgets.HiddenInput()
@sensitive_variables("data")
def handle(self, request, data):
""" Process the user's login via Keystone.
Note: We don't use the messages framework here (including messages
created by ``exceptions.handle`` beause they will not be displayed
on the login page (intentionally). Instead we add all error messages
to the form's ``non_field_errors``, causing them to appear as
errors on the form itself.
"""
if 'user_name' in request.session:
if request.session['user_name'] != data['username']:
# To avoid reusing another user's sessi
|
on, create a
# new,
|
empty session if the existing session
# corresponds to a different authenticated user.
request.session.flush()
# Always cycle the session key when viewing the login form to
# prevent session fixation
request.session.cycle_key()
# For now we'll allow fallback to OPENSTACK_KEYSTONE_URL if the
# form post doesn't include a region.
endpoint = data.get('region', None) or settings.OPENSTACK_KEYSTONE_URL
if endpoint != request.session.get('region_endpoint', None):
region_name = dict(self.fields['region'].choices)[endpoint]
request.session['region_endpoint'] = endpoint
request.session['region_name'] = region_name
request.user.service_catalog = None
redirect_to = request.REQUEST.get(REDIRECT_FIELD_NAME, "")
if data.get('tenant', None):
try:
token = api.token_create(request,
data.get('tenant'),
data['username'],
data['password'])
tenants = api.tenant_list_for_token(request, token.id)
except:
msg = _('Unable to authenticate for that project.')
exceptions.handle(request, ignore=True)
return self.api_error(msg)
_set_session_data(request, token)
user = users.get_user_from_request(request)
redirect = redirect_to or base.Horizon.get_user_home(user)
return shortcuts.redirect(redirect)
elif data.get('username', None):
try:
unscoped_token = api.token_create(request,
'',
data['username'],
data['password'])
except keystone_exceptions.Unauthorized:
msg = _('Invalid user name or password.')
exceptions.handle(request, ignore=True)
return self.api_error(msg)
except:
# If we get here we don't want to show a stack trace to the
# user. However, if we fail here, there may be bad session
# data that's been cached already.
request.user_logout()
msg = _("An error occurred authenticating. "
"Please try again later.")
exceptions.handle(request, ignore=True)
return self.api_error(msg)
# Unscoped token
request.session['unscoped_token'] = unscoped_token.id
request.user.username = data['username']
# Get the tenant list, and log in using first tenant
# FIXME (anthony): add tenant chooser here?
try:
tenants = api.tenant_list_for_token(request, unscoped_token.id)
except:
exceptions.handle(request, ignore=True)
tenants = []
# Abort if there are no valid tenants for this user
if not tenants:
msg = _('You are not authorized for any projects.')
return self.api_error(msg)
# Create a token.
# NOTE(gabriel): Keystone can return tenants that you're
# authorized to administer but not to log into as a user, so in
# the case of an Unauthorized error we should iterate through
# the tenants until one succeeds or we've failed them all.
while tenants:
tenant = tenants.pop()
try:
token = api.token_create_scoped(request,
tenant.id,
unscoped_token.id)
break
except:
# This will continue for recognized Unauthorized
# exceptions from keystoneclient.
exceptions.handle(request, ignore=True)
token = None
if token is None:
msg = _("You are not authorized for any available projects.")
return self.api_error(msg)
_set_session_data(request, token)
user = users.get_user_from_request(request)
redirect = redirect_to or base.Horizon.get_user_home(user)
return shortcuts.redirect(redirect)
class LoginWithTenant(Login):
"""
Exactly like :class:`.Login` but includes
|
maxive/erp
|
addons/lunch/models/lunch.py
|
Python
|
agpl-3.0
| 14,593
| 0.003358
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import OrderedDict
import json
import datetime
from odoo import api, fields, models, _
from odoo.exceptions import AccessError, ValidationError
from odoo.addons import decimal_precision as dp
class LunchOrder(models.Model):
"""
A lunch order contains one or more lunch order line(s). It is associated to a user for a given
date. When creating a lunch order, applicable lunch alerts are displayed.
"""
_name = 'lunch.order'
_description = 'Lunch Order'
_order = 'date desc'
def _default_previous_order_ids(self):
prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='id desc')
# If we return return prev_order.ids, we will have duplicates (identical orders).
# Therefore, this following part removes duplicates based on product_id and note.
return list({
(order.product_id, order.note): order.id
for order in prev_order
}.values())
user_id = fields.Many2one('res.users', 'User', readonly=True,
states={'new': [('readonly', False)]},
default=lambda self: self.env.uid)
date = fields.Date('Date', required=True, readonly=True,
states={'new': [('readonly', False)]},
default=fields.Date.context_today)
order_line_ids = fields.One2many('lunch.order.line', 'order_id', 'Products',
readonly=True, copy=True,
states={'new': [('readonly', False)], False: [('readonly', False)]})
total = fields.Float(compute='_compute_total', string="Total", store=True)
state = fields.Selection([('new', 'New'),
('confirmed', 'Received'),
('cancelled', 'Cancelled')],
'Status', readonly=True, index=True, copy=False,
compute='_compute_order_state', store=True)
alerts = fields.Text(compute='_compute_alerts_get', string="Alerts")
company_id = fields.Many2one('res.company', related='user_id.company_id', store=True)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id', readonly=True, store=True)
cash_move_balance = fields.Monetary(compute='_compute_cash_move_balance', multi='cash_move_balance')
balance_visible = fields.Boolean(compute='_compute_cash_move_balance', multi='cash_move_balance')
previous_order_ids = fields.Many2many('lunch.order.line', compute='_compute_previous_order')
previous_order_widget = fields.Text(compute='_compute_previous_order')
@api.one
@api.depends('order_line_ids')
def _compute_total(self):
"""
get and sum the order lines' price
"""
self.total = sum(
orderline.price for orderline in self.order_line_ids)
@api.multi
def name_get(self):
return [(order.id, '%s %s' % (_('Lunch Order'), '#%d' % order.id)) for order in self]
@api.depends('state')
def _compute_alerts_get(self):
"""
get the alerts to display on the order form
"""
alert_msg = [alert.message
for alert in self.env['lunch.alert'].search([])
if alert.display]
if self.state == 'new':
self.alerts = alert_msg and '\n'.join(alert_msg) or False
@api.multi
@api.depends('user_id', 'state')
def _compute_previous_order(self):
self.ensure_one()
self.previous_order_widget = json.dumps(False)
prev_order = self.env['lunch.order.line'].search([('user_id', '=', self.env.uid), ('product_id.active', '!=', False)], limit=20, order='date desc, id desc')
# If we use prev_order.ids, we will have duplicates (identical orders).
# Therefore, this following part removes duplicates based on product_id and note.
self.previous_order_ids = list({
(order.product_id, order.note): order.id
for order in prev_order
}.values())
if self.previous_order_ids:
lunch_data = {}
for line in self.previous_order_ids:
lunch_data[line.id] = {
'line_id': line.id,
'product_id': line.product_id.id,
'product_name': line.product_id.name,
'supplier': line.supplier.name,
'note': line.note,
'price': line.price,
'date': line.date,
|
'currency_id': line.currency_id.id,
}
|
# sort the old lunch orders by (date, id)
lunch_data = OrderedDict(sorted(lunch_data.items(), key=lambda t: (t[1]['date'], t[0]), reverse=True))
self.previous_order_widget = json.dumps(lunch_data)
@api.one
@api.depends('user_id')
def _compute_cash_move_balance(self):
domain = [('user_id', '=', self.user_id.id)]
lunch_cash = self.env['lunch.cashmove'].read_group(domain, ['amount', 'user_id'], ['user_id'])
if len(lunch_cash):
self.cash_move_balance = lunch_cash[0]['amount']
self.balance_visible = (self.user_id == self.env.user) or self.user_has_groups('lunch.group_lunch_manager')
@api.one
@api.constrains('date')
def _check_date(self):
"""
Prevents the user to create an order in the past
"""
date_order = datetime.datetime.strptime(self.date, '%Y-%m-%d')
date_today = datetime.datetime.strptime(fields.Date.context_today(self), '%Y-%m-%d')
if (date_order < date_today):
raise ValidationError(_('The date of your order is in the past.'))
@api.one
@api.depends('order_line_ids.state')
def _compute_order_state(self):
"""
Update the state of lunch.order based on its orderlines. Here is the logic:
- if at least one order line is cancelled, the order is set as cancelled
- if no line is cancelled but at least one line is not confirmed, the order is set as new
- if all lines are confirmed, the order is set as confirmed
"""
if not self.order_line_ids:
self.state = 'new'
else:
isConfirmed = True
for orderline in self.order_line_ids:
if orderline.state == 'cancelled':
self.state = 'cancelled'
return
elif orderline.state == 'confirmed':
continue
else:
isConfirmed = False
if isConfirmed:
self.state = 'confirmed'
else:
self.state = 'new'
return
class LunchOrderLine(models.Model):
_name = 'lunch.order.line'
_description = 'lunch order line'
_order = 'date desc, id desc'
name = fields.Char(related='product_id.name', string="Product Name", readonly=True)
order_id = fields.Many2one('lunch.order', 'Order', ondelete='cascade', required=True)
product_id = fields.Many2one('lunch.product', 'Product', required=True)
category_id = fields.Many2one('lunch.product.category', string='Product Category',
related='product_id.category_id', readonly=True, store=True)
date = fields.Date(string='Date', related='order_id.date', readonly=True, store=True)
supplier = fields.Many2one('res.partner', string='Vendor', related='product_id.supplier',
readonly=True, store=True)
user_id = fields.Many2one('res.users', string='User', related='order_id.user_id',
readonly=True, store=True)
note = fields.Text('Note')
price = fields.Float(related='product_id.price', readonly=True, store=True,
digits=dp.get_precision('Account'))
state = fields.Selection([('new', 'New'),
('confirmed', 'Received'),
('ordered', 'Ord
|
abelectronicsuk/ABElectronics_Python_Libraries
|
IOPi/tests/get_bus_pullups.py
|
Python
|
gpl-2.0
| 1,397
| 0
|
#!/usr/bin/env python
"""
================================================
ABElectronics IO Pi Tests | test get_bus_pullups function
Requires python smbus to be installed
For Python 2 install with: sudo apt-get install python-smbus
For Python 3 install with: sudo apt-get install python3-smbus
run with: python3 get_bus_pullups.py
================================================
This test validates the get_bus_pullups function in the IOPi class.
=== Expected Result ============================
> Console Output:
Test Passed
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
try:
import sys
sys.path.append("..")
from IOPi import IOPi
except ImportError:
raise ImportError("Failed to import IOPi library")
def main():
"""
Main program function
"""
passed = True
iopi = IOPi(0x20, False) # new iopi object without initialisation
for a in range(1, 65536):
iopi.set_bus_pullups(a)
x = iopi.get_bus_pullups()
|
if x != a:
passed = False
break
iopi.set_bus_pullups(a)
x = iopi.get_bus_pullups()
|
if x != a:
passed = False
break
if passed is False:
print("Test Failed")
else:
print("Test Passed")
if __name__ == "__main__":
main()
|
quattor/aquilon
|
lib/aquilon/worker/commands/show_realm_all.py
|
Python
|
apache-2.0
| 1,108
| 0
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2014,2016 C
|
ontributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS
|
IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show realm --all`."""
from aquilon.aqdb.model import Realm
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.formats.list import StringAttributeList
class CommandShowRealmAll(BrokerCommand):
required_parameters = []
def render(self, session, **_):
q = session.query(Realm).order_by(Realm.name)
return StringAttributeList(q.all(), "name")
|
edx-solutions/edx-platform
|
lms/djangoapps/courseware/tests/test_lti_integration.py
|
Python
|
agpl-3.0
| 9,264
| 0.002807
|
"""LTI integration tests"""
import json
from collections import OrderedDict
import mock
import oauthlib
import six
from django.conf import settings
from django.urls import reverse
from six import text_type
from lms.djangoapps.courseware.tests.helpers import BaseTestXmodule
from lms.djangoapps.courseware.views.views import get_course_lti_endpoints
from openedx.core.lib.url_utils import quote_slashes
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDENT_VIEW
class TestLTI(BaseTestXmodule):
"""
Integration test for lti xmodule.
It checks overall code, by assuring that context that goes to template is correct.
As part of that, checks oauth signature generation by mocking signing function
of `oauthlib` library.
"""
CATEGORY = "lti"
def setUp(self):
"""
Mock oauth1 signing of requests library for testing.
"""
super(TestLTI, self).setUp()
mocked_nonce = u'135685044251684026041377608307'
mocked_timestamp = u'1234567890'
mocked_signature_after_sign = u'my_signature%3D'
mocked_decoded_signature = u'my_signature='
# Note: this course_id is actually a course_key
context_id = text_type(self.item_descriptor.course_id)
user_id = text_type(self.item_descriptor.xmodule_runtime.anonymous_student_id)
hostname = self.item_descriptor.xmodule_runtime.hostname
resource_link_id = text_type(six.moves.urllib.parse.quote('{}-{}'.format(hostname,
self.item_descriptor.location.html_id()
)))
sourcedId = "{context}:{resource_link}:{user_id}".format(
context=six.moves.urllib.parse.quote(context_id),
resource_link=resource_link_id,
user_id=user_id
)
self.correct_headers = {
u'user_id': user_id,
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': u'Student',
u'context_id': context_id,
u'resource_link_id': resource_link_id,
u'lis_result_sourcedid': sourcedId,
u'oauth_nonce': mocked_nonce,
u'oauth_timestamp': mocked_timestamp,
u'oauth_consumer_key': u'',
u'oauth_signature_method': u'HMAC-SHA1',
u'oauth_version': u'1.0',
u'oauth_signature': mocked_decoded_signature
}
saved_sign = oauthlib.oauth1.Client.sign
self.expected_context = {
'display_name': self.item_descriptor.display_name,
'input_fields': self.correct_headers,
'element_class': self.item_descriptor.category,
|
'element_id': self.item_descriptor.location.html_id(),
'launch_url': u'http://www.example.com', # default value
'open_in_a_new_page': Tr
|
ue,
'form_url': self.item_descriptor.xmodule_runtime.handler_url(self.item_descriptor,
'preview_handler').rstrip('/?'),
'hide_launch': False,
'has_score': False,
'module_score': None,
'comment': u'',
'weight': 1.0,
'ask_to_send_username': self.item_descriptor.ask_to_send_username,
'ask_to_send_email': self.item_descriptor.ask_to_send_email,
'description': self.item_descriptor.description,
'button_text': self.item_descriptor.button_text,
'accept_grades_past_due': self.item_descriptor.accept_grades_past_due,
}
def mocked_sign(self, *args, **kwargs):
"""
Mocked oauth1 sign function.
"""
# self is <oauthlib.oauth1.rfc5849.Client object> here:
__, headers, __ = saved_sign(self, *args, **kwargs)
# we should replace nonce, timestamp and signed_signature in headers:
old = headers[u'Authorization']
old_parsed = OrderedDict([param.strip().replace('"', '').split('=') for param in old.split(',')])
old_parsed[u'OAuth oauth_nonce'] = mocked_nonce
old_parsed[u'oauth_timestamp'] = mocked_timestamp
old_parsed[u'oauth_signature'] = mocked_signature_after_sign
headers[u'Authorization'] = ', '.join([k + '="' + v + '"' for k, v in old_parsed.items()])
return None, headers, None
patcher = mock.patch.object(oauthlib.oauth1.Client, "sign", mocked_sign)
patcher.start()
self.addCleanup(patcher.stop)
def test_lti_constructor(self):
generated_content = self.item_descriptor.render(STUDENT_VIEW).content
expected_content = self.runtime.render_template('lti.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
def test_lti_preview_handler(self):
generated_content = self.item_descriptor.preview_handler(None, None).body
expected_content = self.runtime.render_template('lti_form.html', self.expected_context)
self.assertEqual(generated_content.decode('utf-8'), expected_content)
class TestLTIModuleListing(SharedModuleStoreTestCase):
"""
a test for the rest endpoint that lists LTI modules in a course
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
@classmethod
def setUpClass(cls):
super(TestLTIModuleListing, cls).setUpClass()
cls.course = CourseFactory.create(display_name=cls.COURSE_NAME, number=cls.COURSE_SLUG)
cls.chapter1 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter1",
category='chapter')
cls.section1 = ItemFactory.create(
parent_location=cls.chapter1.location,
display_name="section1",
category='sequential')
cls.chapter2 = ItemFactory.create(
parent_location=cls.course.location,
display_name="chapter2",
category='chapter')
cls.section2 = ItemFactory.create(
parent_location=cls.chapter2.location,
display_name="section2",
category='sequential')
# creates one draft and one published lti module, in different sections
cls.lti_published = ItemFactory.create(
parent_location=cls.section1.location,
display_name="lti published",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_published'),
)
cls.lti_draft = ItemFactory.create(
parent_location=cls.section2.location,
display_name="lti draft",
category="lti",
location=cls.course.id.make_usage_key('lti', 'lti_draft'),
publish_item=False,
)
def expected_handler_url(self, handler):
"""convenience method to get the reversed handler urls"""
return "https://{}{}".format(settings.SITE_NAME, reverse(
'xblock_handler_noauth',
args=[
text_type(self.course.id),
quote_slashes(text_type(self.lti_published.scope_ids.usage_id)),
handler
]
))
def test_lti_rest_bad_course(self):
"""Tests what happens when the lti listing rest endpoint gets a bad course_id"""
bad_ids = [u"sf", u"dne/dne/dne", u"fo/ey/\\u5305"]
for bad_course_id in bad_ids:
lti_rest_endpoints_url = 'courses/{}/lti_rest_endpoints/'.format(bad_course_id)
response = self.client.get(lti_rest_endpoints_url)
self.assertEqual(404, response.status_code)
def test_lti_rest_listing(self):
"""tests that the draft lti module is part of the endpoint response"""
request = mock.Mock()
request.method = 'GET'
response
|
narrowmark/engelbart
|
note_entry.py
|
Python
|
mit
| 4,157
| 0.006495
|
import time
import wx
import xapian
from threading import Thread
from stop_words import stop_words
class NoteEntryFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, None, title="Note Entry", size=(300, 300))
self.db_path = "db"
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
# General GUI layout
hbox =
|
wx.BoxSizer(wx.HORIZONTAL)
sizer = wx.FlexGridSizer(3, 2, 9, 25)
subject = wx.StaticText(panel, label='Subject')
note = wx.StaticText(panel, label='Note')
self.subject_text = wx.TextCtrl(panel)
self.note_text = wx.TextCtrl(panel, style=wx.TE_MULTILINE)
sizer.AddMany([(subject), (self.subject_text, 1, wx.EXPAND),
(note), (se
|
lf.note_text, 1, wx.EXPAND)])
sizer.AddGrowableRow(1, 1)
sizer.AddGrowableCol(1, 1)
hbox.Add(sizer, proportion=1, flag=wx.ALL|wx.EXPAND, border=15)
panel.SetSizer(hbox)
# Accelerator features
save = wx.NewId()
open = wx.NewId()
self.Bind(wx.EVT_MENU, self.onCtrlS, id=save)
self.Bind(wx.EVT_MENU, self.onCtrlO, id=open)
self.accel = wx.AcceleratorTable(
[(wx.ACCEL_CTRL, ord('S'), save),
(wx.ACCEL_CTRL, ord('O'), open)])
self.SetAcceleratorTable(self.accel)
self.Show()
self.ueg()
def onCtrlS(self, e):
self.index(self.db_path)
def onCtrlO(self, e):
pass
def index(self, db_path="db"):
subject = self.subject_text.GetValue()
note = self.note_text.GetValue()
now = time.ctime()
db = xapian.WritableDatabase(db_path, xapian.DB_CREATE_OR_OPEN)
indexer = xapian.TermGenerator()
stemmer = xapian.Stem("english")
indexer.set_stemmer(stemmer)
doc = xapian.Document()
doc.set_data(note)
indexer.set_document(doc)
indexer.index_text(subject)
indexer.index_text(note)
indexer.index_text(now)
db.add_document(doc)
self.note_text.Clear()
def ueg(self):
p_search = PassiveSearchThread(self)
class PassiveSearchFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, title="Passive Search", size=(300,300))
self.parent = parent
self.InitUI()
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.compare, self.timer)
self.timer.Start(1500)
def InitUI(self):
panel = wx.Panel(self)
# General GUI layout
hbox = wx.BoxSizer(wx.HORIZONTAL)
sizer = wx.FlexGridSizer(2, 3, 9, 25)
result = wx.StaticText(panel, label='Result')
self.result_text = wx.TextCtrl(panel, style=wx.TE_MULTILINE)
sizer.Add(result, 1, wx.EXPAND)
sizer.Add(self.result_text, 1, wx.EXPAND)
sizer.AddGrowableRow(0, 2)
sizer.AddGrowableCol(1, 2)
hbox.Add(sizer, proportion=1, flag=wx.ALL|wx.EXPAND, border=10)
panel.SetSizer(hbox)
self.init_string = ''
self.Show()
def pre_process(self, query_string):
processed = ''
query_string = query_string.split(' ')
for word in query_string:
if word not in stop_words:
processed += word + ' '
return processed
def compare(self, e):
query_string = self.parent.note_text.GetValue()
if self.init_string != query_string:
self.search(e)
self.init_string = query_string
def search(self, e):
database = xapian.Database(self.parent.db_path)
enquire = xapian.Enquire(database)
query_string = self.parent.note_text.GetValue()
query_string = self.pre_process(query_string)
qp = xapian.QueryParser()
stemmer = xapian.Stem("english")
qp.set_stemmer(stemmer)
qp.set_database(database)
qp.set_stemming_strategy(xapian.QueryParser.STEM_SOME)
query = qp.parse_query(query_string)
enquire.set_query(query)
matches = enquire.get_mset(0, 10)
final = ''
for m in matches:
final = final + m.document.get_data() + "\n"
self.result_text.SetValue(final)
class PassiveSearchThread(wx.Frame, Thread):
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
self.run()
def run(self):
p_search = PassiveSearchFrame(self.parent)
if __name__ == '__main__':
app = wx.App()
NoteEntryFrame(None)
app.MainLoop()
|
rew4332/tensorflow
|
tensorflow/contrib/slim/python/slim/evaluation.py
|
Python
|
apache-2.0
| 12,824
| 0.002963
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics.
The evaluation.py module contains helper functions for evaluating TensorFlow
modules using a variety of metrics and summarizing the results.
**********************
* Evaluating Metrics *
**********************
In the simplest use case, we use a model to create the predictions, then specify
the metrics and finally call the `evaluation` method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"accuracy": slim.metrics.accuracy(predictions, labels),
"mse": slim.metrics.mean_squared_error(predictions, labels),
})
init_op = tf.group(
tf.initialize_all_variables(),
tf.initialize_local_variables())
with tf.Session() as sess:
metric_values = slim.evaluation(
sess,
num_evals=1,
init_op=init_op,
eval_op=names_to_updates.values(),
final_op=name_to_values.values())
for metric, value in zip(names_to_values.keys(), metric_values):
logging.info('Metric %s has value: %f', metric, value)
************************************************
* Evaluating a Checkpointed Model with Metrics *
************************************************
Often, one wants to evaluate a model checkpoint saved on disk. This can be
performed once or repeatedly on a set schedule.
To evaluate a particular model, users define zero or more metrics and zero or
more summaries and call the evaluation_l
|
oop method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
|
# Choose the metrics to compute:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"accuracy": slim.metrics.accuracy(predictions, labels),
"mse": slim.metrics.mean_squared_error(predictions, labels),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.scalar_summary(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
# Evaluate every 10 minutes:
slim.evaluation_loop(
master='',
checkpoint_dir,
logdir,
num_evals=num_evals,
eval_op=names_to_updates.values(),
summary_op=tf.merge_summary(summary_ops),
eval_interval_secs=600)
**************************************************
* Evaluating a Checkpointed Model with Summaries *
**************************************************
At times, an evaluation can be performed without metrics at all but rather
with only summaries. The user need only leave out the 'eval_op' argument:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the summaries to write:
tf.scalar_summary(...)
tf.histogram_summary(...)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# Evaluate once every 10 minutes.
slim.evaluation_loop(
master='',
checkpoint_dir,
logdir,
num_evals=1,
summary_op=tf.merge_summary(summary_ops),
eval_interval_secs=600)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor
from tensorflow.python.training import training_util
__all__ = ['evaluation', 'evaluation_loop', 'wait_for_new_checkpoint']
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum amount of time to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
return checkpoint_path
def evaluation(sess,
num_evals=1,
init_op=None,
init_op_feed_dict=None,
eval_op=None,
eval_op_feed_dict=None,
final_op=None,
final_op_feed_dict=None,
summary_op=None,
summary_op_feed_dict=None,
summary_writer=None,
global_step=None):
"""Performs a single evaluation run.
A single evaluation consists of several steps run in the following order:
(1) an initialization op, (2) an evaluation op which is executed `num_evals`
times (3) a finalization op and (4) the execution of a summary op which is
written out using a summary writer.
Args:
sess: The current TensorFlow `Session`.
num_evals: The number of times to execute `eval_op`.
init_op: An operation run at the beginning of evaluation.
init_op_feed_dict: A feed dictionary to use when executing `init_op`.
eval_op: A operation run `num_evals` times.
eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
final_op: An operation to execute after all of the `eval_op` executions. The
value of `final_op` is returned.
final_op_feed_dict: A feed dictionary to use when executing `final_op`.
summary_op: A summary op executed after `eval_op` and `finalize_op`.
summary_op_feed_dict: An optional feed dictionary to use when executing the
`summary_op`.
summary_writer: The summery writer used if `summary_op` is provided.
global_step: the global step variable. If left as `None`, then
slim.variables.global_step() is used.
Returns:
The value of `final_op` or `None` if `final_op` is `None`.
Raises:
ValueError: if `summary_op` is provided but `global_step` is `None`.
"""
if init_op is not None:
logging.info('Executing init op')
sess.run(init_op, init_op_feed_dict)
if eval_op is not None:
logging.info('Executing eval ops')
for i in range(int(num_evals)):
logging.info('Executing eval_op %d/%d', i + 1, num_evals)
sess.run(eval_op, eval_op_feed_dict)
if final_op is not None:
logging.info('Executing final op')
final_op_value = sess.run(final_op, final_op_feed_dict)
else:
final_op_value = None
if summary_op is not None:
logging.info('Executing summary op')
if global_step is None:
gl
|
cjaymes/pyscap
|
src/scap/model/oval_5/defs/linux/SystemDUnitDependencyStateElement.py
|
Python
|
gpl-3.0
| 1,172
| 0.00256
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR P
|
URPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.linux.StateType import StateType
logger = logging.getLogger(__name__)
class SystemDUnitDependencyStateElement(StateType):
MODEL_MAP = {
'tag_name': 'systemdunitdependency_state',
|
'elements': [
{'tag_name': 'unit', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0, 'max': 1},
{'tag_name': 'dependency', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0, 'max': 1},
],
}
|
OxPython/Python_float_round
|
src/digit_precision_float.py
|
Python
|
epl-1.0
| 432
| 0.006977
|
#!/usr/bin/env
|
python
# -*- coding: utf-8 -*-
'''
Created on Jul 16, 2014
@author: anroco
How to define the number of decimal digits of a float in Python?
¿Cómo definir la cantidad de digitos decimales de
|
un float en Python?
'''
#crate a float number
f = 13.9497389867
print(f)
#this method rounded to the number of digits defined after the decimal point.
print(round(f, 2))
#define 4 digits of precision
print(round(f, 4))
|
IllinoisRoboticsInSpace/Arduino_Control
|
RemoteControl/Move.py
|
Python
|
mit
| 675
| 0.056296
|
from ArduinoSerial import sendData, beginTransmission
import serial
import time
import atexit
ser = serial.Serial("/dev/ttyACM0",115200)
ser.flushInput()
#The next five lines allow the motors to stop once the ctrl+c command is given to abort the program.
def exit_handler():
sendData(ser,1,0)
sendData(ser,2,0)
atexit.regis
|
ter(exit_handler)
if ser.is_open:
beginTransmission(ser)
else:
print("Serial Closed")
square = [(100,100),(100,-100),(100,100),(100,-100),(100,100),(100,-100),(100,100),(100,-100),(0,0)] #This should create a square
for cmd in squ
|
are:
sendData(ser, 1, cmd[0]) #This sends the commands to arduino, for each motor
sendData(ser, 2, cmd[1])
|
zouzhberk/ambaridemo
|
demo-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/scripts/application_timeline_server.py
|
Python
|
apache-2.0
| 1,573
| 0.006357
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from yarn import yarn
from service import service
class ApplicationTimelineServer(Script):
def install(self, env):
self.install_packages(env)
#self.configure(env)
def configure(s
|
elf, env):
import params
env.set_params(params)
yarn()
def start(self, env):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
service('historyserver', action='start')
def stop(self, env):
import params
env.set_params(params)
service('historyserver', action='stop')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.yarn
|
_historyserver_pid_file)
if __name__ == "__main__":
ApplicationTimelineServer().execute()
|
yourlabs/django-cities-light
|
src/cities_light/tests/test_update.py
|
Python
|
mit
| 5,691
| 0
|
"""Tests for update records."""
import unittest
from dbdiff.fixture import Fixture
from .base import TestImportBase, FixtureDir
class TestUpdate(TestImportBase):
"""Tests update procedure."""
def test_update_fields(self):
"""Test all fields are updated."""
fixture_dir = FixtureDir('update')
self.import_data(
fixture_dir,
'initial_country',
'initial_region',
'initial_subregion',
'initial_city',
'initial_translations'
)
self.import_data(
fixture_dir,
'update_country',
'update_region',
'update_subregion',
'update_city',
'update_translations',
)
Fixture(
fixture_dir.get_file_path('update_fields.json')
).assertNoDiff()
def test_update_fields_wrong_timezone(self):
"""Test all fields are updated, but timezone field is wrong."""
fixture_dir = FixtureDir('update')
self.import_data(
fixture_dir,
'initial_country',
'initial_region',
'initial_subregion',
'initial_city',
'initial_translations'
)
self.import_data(
fixture_dir,
'update_country',
'update_region',
'update_subregion',
'update_city_wtz',
'update_translations',
)
Fixture(
fixture_dir.get_file_path('update_fields_wtz.json')
).assertNoDiff()
def test_change_country(self):
"""Test change country for region/city."""
fixture_dir = FixtureDir('update')
self.import_data(
fixture_dir,
'initial_country',
'initial_region',
'initial_subregion',
'initial_city',
'initial_translations'
)
self.import_data(
fixture_dir,
'change_country',
'update_region',
'update_subregion',
'update_city',
'update_translations',
)
Fixture(
fixture_dir.get_file_path('change_country.json')
).assertNoDiff()
def test_change_region_and_country(self):
"""Test change region and country."""
fixture_dir = FixtureDir('update')
self.import_data(
fixture_dir,
'initial_country',
'initial_region',
'initial_subregion',
'initial_city',
'initial_translations'
)
self.import_data(
fixture_dir,
'change_country',
'change_region',
'update_subregion',
'update_city',
'update_translations',
)
Fixture(
fixture_dir.get_file_path('change_region_and_country.json')
).assertNoDiff()
def test_keep_slugs(self):
"""Test --keep-slugs option."""
fixture_dir = FixtureDir('update')
self.import_data(
fixture_dir,
'initial_country',
'initial_region',
'initial_subregion',
'initial_city',
'initial_translations'
)
self.import_data(
fixture_dir,
'update_country',
'update_region',
'update_subregion',
'update_city',
'update_translations',
keep_slugs=True
)
Fixture(
fixture_dir.get_file_path('keep_slugs.json'),
).assertNoDiff()
def test_add_records(self):
"""Test that new records are added."""
fixture_dir = FixtureDir('update')
|
self.import_data(
fixture_dir,
'initial_country',
'initial_region',
'initial_subregion',
'initial_city',
'initial_translations'
)
self.import_data(
fixture_dir,
'add_country',
'add_region',
'add_subregion',
'add_city',
'add_translations'
)
|
Fixture(
fixture_dir.get_file_path('add_records.json')
).assertNoDiff()
def test_noinsert(self):
"""Test --noinsert option."""
fixture_dir = FixtureDir('update')
self.import_data(
fixture_dir,
'initial_country',
'initial_region',
'initial_subregion',
'initial_city',
'initial_translations'
)
self.import_data(
fixture_dir,
'add_country',
'add_region',
'add_subregion',
'add_city',
'add_translations',
noinsert=True
)
Fixture(
fixture_dir.get_file_path('noinsert.json'),
).assertNoDiff()
# TODO: make the test pass
@unittest.skip("Obsolete records are not removed yet.")
def test_remove_records(self):
"""Test that obsolete records are removed."""
fixture_dir = FixtureDir('update')
self.import_data(
fixture_dir,
'remove_initial_country',
'remove_initial_region',
'remove_initial_subregion',
'remove_initial_city',
'remove_initial_translations'
)
self.import_data(
fixture_dir,
'remove_country',
'remove_region',
'remove_subregion',
'remove_city',
'remove_translations'
)
Fixture(
fixture_dir.get_file_path('remove_records.json')
).assertNoDiff()
|
brain461/ar_too
|
ar_too/__init__.py
|
Python
|
apache-2.0
| 197
| 0.005076
|
# -*- coding: utf-8 -*-
from .api import get_artifactory_config_from_url
|
, update_ldapSettings_from_
|
dict, update_artifactory_config, cr_repository, update_password, get_repo_configs, get_repo_list
|
kevin-coder/tensorflow-fork
|
tensorflow/python/compiler/tensorrt/test/batch_matmul_test.py
|
Python
|
apache-2.0
| 3,185
| 0.002826
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import abso
|
lute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorfl
|
ow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BatchMatMulTest(trt_test.TfTrtIntegrationTestBase):
def GetParams(self):
"""Testing conversion of BatchMatMul in TF-TRT conversion."""
dtype = dtypes.float32
input_name = "input"
input_dims = [12, 5, 8, 12]
output_name = "output"
w1_name = "matmul_w1"
w1_dims = [12, 5, 12, 7]
w2_name = "matmul_w2"
w2_dims = [12, 12, 7]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
w1 = array_ops.placeholder(dtype=dtype, shape=w1_dims, name=w1_name)
w2 = array_ops.placeholder(dtype=dtype, shape=w2_dims, name=w2_name)
with g.device("/GPU:0"):
b = constant_op.constant(np.random.randn(12, 5, 12, 7), dtype=dtype)
x1 = math_ops.matmul(inp, b)
c = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
x1 = x1 + c
x2 = math_ops.matmul(inp, w1)
d = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
x2 = x2 * d
e = self.trt_incompatible_op(inp)
e = gen_array_ops.reshape(e, [12, 40, 12])
x3 = math_ops.matmul(e, w2)
f = constant_op.constant(np.random.randn(40, 1), dtype=dtype)
x3 = x3 + f
x3 = gen_array_ops.reshape(x3, [12, 5, 8, 7])
x3 = self.trt_incompatible_op(x3)
out = x1 + x2 + x3
array_ops.squeeze(out, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(add_shapes=True),
input_names=[input_name, w1_name, w2_name],
input_dims=[[input_dims, w1_dims, w2_dims]],
output_names=[output_name],
expected_output_dims=[[[12, 5, 8, 7]]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0", "TRTEngineOp_1"]
if __name__ == "__main__":
test.main()
|
pr2git/e2openplugin-OpenWebif
|
plugin/__init__.py
|
Python
|
gpl-3.0
| 509
| 0.011788
|
# -*- c
|
oding: utf-8 -*-
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
import gettext
PluginLanguageDomain = "OpenWebif"
PluginLanguagePath = "Extensions/OpenWebif/locale"
def localeInit():
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
t = gettext.dgettext(PluginLanguageDomain, txt)
if t == txt:
t = gettext.gettext(txt)
return t
localeInit()
language.
|
addCallback(localeInit)
|
rapirent/toc_project
|
kuoteng_bot/kuoteng_bot/urls.py
|
Python
|
mit
| 972
| 0.002058
|
"""kuoteng_bot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
|
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import a
|
dmin
from django.conf import settings
#from telegram_bot.views import _set_webhook
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^bot/', include('telegram_bot.urls')),
url(r'^', include('django_telegrambot.urls')),
]
#_set_webhook()
|
Peter-Collins/NormalForm
|
src/py/CoordinateChange.py
|
Python
|
gpl-2.0
| 3,820
| 0.003141
|
"""
AUTHOR: Dr. Andrew David Burbanks, 2005
This software is Copyright (C) 2004-2008 Bristol University
and is released under the GNU General Public License version 2.
MODULE: CoordinateChange
PURPOSE:
Compute the coo
|
rdinate changes relating complex diagonal to complex
nor
|
mal form coordinates.
NOTES:
For parallel computation, one can specify for which variable index we
want to compute the coordinate change.
"""
# parallel; we specify the index that we wish to compute.
from Polynomial import Polynomial
from IsogradeInnerTaylorCoeffs import IsogradeInnerTaylorCoeffs
from Utility import binomial, factorial
from LieTriangle import _make_db_key
class CoordinateChange:
"""
This class is responsible for taking scalar functions written in
one set of variables (either the diagonal complex or normal form
complex) and expressing them in terms of the other set of
variables, making use of the generating function that was computed
during the normalization procedure. In particular, it can be used
to express the coordinates themselves (via the scalar function
projecting onto each coordinate value) in the other coordinate
system, thereby computing the coordinate-change maps as vectors of
polynomials.
"""
def __init__(self, lie_alg, w_i_list):
self._alg = lie_alg
self._w_i = w_i_list
self._iso = IsogradeInnerTaylorCoeffs(self._alg, offset=1) #1
def get_isograde_list_handler(self):
return self._iso
def triangle_diag_in_norm(self, i, j):
"""
Express scalar function of diagonal coordinates in terms of
normal form coordinates.
"""
db_key = _make_db_key(i, j)
bracket = self._alg.bracket
if self._x_ij.has_key(db_key):
pass
else:
if (j == 0):
self._x_ij[db_key] = self._iso.poly_to_inner_taylor(self._f, i)
else:
temp = self.triangle_diag_in_norm(i+1, j-1).copy()
for k in xrange(0, i+1):
if (i+1-k) >= len(self._w_i):
assert 0, 'out of range, (k+1)=%d'%(k+1)
else:
w_term = self._w_i[i+1-k]
temp += binomial(i, k) * bracket(self.triangle_diag_in_norm(k, j-1), w_term)
self._x_ij[db_key] = temp
return self._x_ij[db_key]
def express_diag_in_norm(self, f, x_i_list, x_ij_dict, n):
self._f = f
self._x_i = x_i_list
self._x_ij = x_ij_dict
for i in xrange(0, n + 1):
self._x_i.append(self.triangle_diag_in_norm(0, i))
def triangle_norm_in_diag(self, i, j):
"""
Express normal form coordinate expression in terms of diagonal
coordinates.
"""
db_key = _make_db_key(i, j)
bracket = self._alg.bracket
if self._x_ij.has_key(db_key):
pass
else:
if (i == 0):
self._x_ij[db_key] = self._iso.poly_to_inner_taylor(self._f, j)
else:
temp = self.triangle_norm_in_diag(i-1, j+1).copy()
for k in xrange(0, i):
if (k+1) >= len(self._w_i):
assert 0, 'out of range, (k+1)=%d'%(k+1)
else:
w_term = self._w_i[k+1]
temp -= binomial(i-1, k) * bracket(self.triangle_norm_in_diag(i-k-1, j), w_term) #NOTE: MUST BE MINUS!!!
self._x_ij[db_key] = temp
return self._x_ij[db_key]
def express_norm_in_diag(self, f, x_i_list, x_ij_dict, n):
self._f = f
self._x_i = x_i_list
self._x_ij = x_ij_dict
for i in xrange(0, n + 1):
self._x_i.append(self.triangle_norm_in_diag(i, 0))
|
google-research/episodic-curiosity
|
third_party/gym/ant_wrapper_test.py
|
Python
|
apache-2.0
| 2,294
| 0.001308
|
# coding=utf-8
# The MIT License
#
# Copyright (c) 2016 OpenAI (https://openai.com)
# Copyright (c) 2018 The TF-Agents Authors.
# Copyright (c) 2018 Google LLC (http://google.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnishe
|
d to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLI
|
ED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWIS, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Tests for google3.third_party.py.third_party.gym.ant_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from third_party.gym import ant_wrapper
from google3.pyglib import resources
from google3.testing.pybase import googletest
ASSETS_DIR = 'google3/third_party/py/third_party.gym/assets'
def get_resource(filename):
return resources.GetResourceFilenameInDirectoryTree(
os.path.join(ASSETS_DIR, filename))
class AntWrapperTest(googletest.TestCase):
def test_ant_wrapper(self):
env = ant_wrapper.AntWrapper(
get_resource('mujoco_ant_custom_texture_camerav2.xml'),
texture_mode='fixed',
texture_file_pattern=get_resource('texture.png'))
env.reset()
obs, unused_reward, unused_done, info = env.step(env.action_space.sample())
self.assertEqual(obs.shape, (27,))
self.assertIn('frame', info)
self.assertEqual(info['frame'].shape,
(120, 160, 3))
if __name__ == '__main__':
googletest.main()
|
ActivisionGameScience/assertpy
|
tests/test_same_as.py
|
Python
|
bsd-3-clause
| 2,899
| 0.008624
|
# Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or prom
|
ote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMIT
|
ED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from assertpy import assert_that, fail
def test_is_same_as():
for obj in [object(), 1, 'foo', True, None, 123.456]:
assert_that(obj).is_same_as(obj)
def test_is_same_as_failure():
try:
obj = object()
other = object()
assert_that(obj).is_same_as(other)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches('Expected <.+> to be identical to <.+>, but was not.')
def test_is_not_same_as():
obj = object()
other = object()
assert_that(obj).is_not_same_as(other)
assert_that(obj).is_not_same_as(1)
assert_that(obj).is_not_same_as(True)
assert_that(1).is_not_same_as(2)
assert_that({'a':1}).is_not_same_as({'a':1})
assert_that([1,2,3]).is_not_same_as([1,2,3])
if sys.version_info[0] == 3 and sys.version_info[1] >= 7:
assert_that((1,2,3)).is_same_as((1,2,3)) # tuples are identical in py 3.7
else:
assert_that((1,2,3)).is_not_same_as((1,2,3))
def test_is_not_same_as_failure():
for obj in [object(), 1, 'foo', True, None, 123.456]:
try:
assert_that(obj).is_not_same_as(obj)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches('Expected <.+> to be not identical to <.+>, but was.')
|
sebrandon1/neutron
|
neutron/extensions/providernet.py
|
Python
|
apache-2.0
| 3,608
| 0
|
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api import validators
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from neutron._i18n import _
from neutron.api import extensions
NETWORK_TYPE = 'provider:network_type'
PHYSICAL_NETWORK = 'provider:physical_network'
SEGMENTATION_ID = 'provider:segmentation_id'
ATTRIBUTES = (NETWORK_TYPE, PHYSICAL_NETWORK, SEGMENTATION_ID)
# Common definitions for maximum string field length
NETWORK_TYPE_MAX_LEN = 32
PHYSICAL_NETWORK_MAX_LEN = 64
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
NETWORK_TYPE: {'allow_post': True, 'allow_put': True,
'validate': {'type:string': NETWORK_TYPE_MAX_LEN},
'default': constants.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
PHYSICAL_NETWORK: {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
PHYSICAL_NETWORK_MAX_LEN},
'default': constants.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
SEGMENTATION_ID: {'allow_post': True, 'allow_put': True,
'convert_to': converters.convert_to_int,
'enforce_policy': True,
'default': constants.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
def _raise_if_updates_provider_attributes(attrs):
"""Raise exception if provider attributes are present.
This method is used for plugins that do not support
updating provider networks.
"""
if any(validators.is_attr_set(attrs.get(a)) for a in ATTRIBUTES):
msg = _("Plugin does not support updating provider attributes")
raise n_exc.InvalidInput(error_message=msg)
class Provi
|
dernet(extensions.ExtensionDescriptor):
"""Extension class supporting provider networks.
This class is used by neutron's extension framework to make
metadata about the provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with attributes in the provider namespace.
With admin rights, network dictionaries returned will al
|
so include
provider attributes.
"""
@classmethod
def get_name(cls):
return "Provider Network"
@classmethod
def get_alias(cls):
return "provider"
@classmethod
def get_description(cls):
return "Expose mapping of virtual networks to physical networks"
@classmethod
def get_updated(cls):
return "2012-09-07T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
thdb-theo/Zombie-Survival
|
src/pickup.py
|
Python
|
mit
| 3,543
| 0.001411
|
from functools import partial
from random import random, randint, choice
import pygame
import init as _
from baseclass import BaseClass
from options import Options
try:
from cython_ import collide
except ImportError:
from python_ import collide
from miscellaneous import further_than, scale
from tile import Tile
class PickUp(BaseClass):
"""Creates a PickUp
Params:
x: x coordinate of the PickUp
y: y coordinate of the PickUp
spawn_tile: The index of the tile on which the PickUp is
type_: A float between 0 and 1. If it is over 2/3 the PickUp is ammo else health
Example:
>>> tile = Tile.instances[PickUp.spawn_tiles[0]]
>>> a = PickUp(*tile.pos, PickUp.spawn_tiles[0], type_=0.9)
>>> a.type
"health"
TODO: Add more pick ups"""
with open(Options.mappath) as file:
spawn_tiles = [
i for i, x in enumerate(file.read().replace("\n", "")) if x == "P"
]
init_round, left_round = 4, 4
zombie_init_round = None
images = {"ammo": scale(pygame.image.load("assets/Images/PickUps/ammo.png")),
"health": scale(pygame.image.load("assets/Images/PickUps/health.png"))}
sounds = {"ammo": pygame.mixer.Sound("assets/Audio/PickUp/ammo_short.ogg"),
"health": pygame.mixer.Sound("assets/Audio/PickUp/health.ogg")}
sounds["ammo"].set_volume(Options.volume)
sounds["health"].set_volume(Options.volume)
instances = set()
def __init__(self, x, y, spawn_tile, type_):
super().__init__(x, y)
PickUp.instances.add(self)
self.incr = randint(20, 35)
self.spawn_tile = spawn_tile
self.type = "ammo" if type_ < 2 / 3 else "health"
PickUp.spawn_tiles.remove(spawn
|
_tile)
@classmethod
def spawn(cls, survivor):
_further_than = partial(further_than, survivor=survivor, min_dist=150)
pos_spawn_tiles = list(filter(_further_than, cls.spawn_tiles))
if not pos_spa
|
wn_tiles: # If no pick-up spawn is far enough away
if not cls.spawn_tiles: # If all pick-up spawns are occupied, don"t spawn
return
pos_spawn_tiles.extend(cls.spawn_tiles)
cls.left_round -= 1
type_ = random()
spawn_tile = choice(pos_spawn_tiles)
spawn_node = Tile.instances[spawn_tile]
cls(*spawn_node.pos, spawn_tile, type_)
@classmethod
def update(cls, screen, survivor, total_frames):
if cls.left_round:
try:
if total_frames % ((Options.fps * cls.zombie_init_round * 2) //
cls.init_round) == 0:
cls.spawn(survivor)
except ZeroDivisionError:
if total_frames % Options.fps * 10 == 0:
cls.spawn(survivor)
del_pick_up = set()
for pick_up in cls.instances:
screen.blit(cls.images[pick_up.type], pick_up.pos.as_ints())
if collide(*pick_up.pos, *pick_up._size, *survivor.pos, *survivor._size):
setattr(survivor, pick_up.type,
getattr(survivor, pick_up.type) + pick_up.incr)
cls.sounds[pick_up.type].play()
cls.spawn_tiles.append(pick_up.spawn_tile)
del_pick_up.add(pick_up)
del pick_up
cls.instances -= del_pick_up
if __name__ == "__main__":
Tile.create()
import doctest
doctest.testmod()
|
alexwlchan/python-taskpaper
|
test/test_item.py
|
Python
|
mit
| 3,541
| 0
|
# -*- encoding: utf-8 -*-
from hypothesis import given
from hypothesis.strategies import integers, lists
import pytest
from taskpaper import TaskPaperItem, TaskPaperError
from utils import taskpaper_item_strategy
@given(integers())
def test_setting_tab_size(tab_size):
"""We can set the tab size on TaskPaperItem."""
item = TaskPaperItem('hello world', tab_size=tab_size)
assert item.tab_size == tab_size
class TestParentChildRelationship(object):
"""
Tests of the parent-child relationship between items.
"""
def test_default_parent_is_none(self):
"""By default, a task does not have a parent."""
item = TaskPaperItem('hello world')
assert item.parent is None
def test_default_task_has_no_children(self):
"""By default, a task has no children."""
item = TaskPaperItem('hello world')
assert item.children == []
def test_setting_a_parent(self):
"""Test we can initialize an item with a parent."""
item_p = TaskPaperItem('parent')
item_c = TaskPaperItem('child', parent=item_p)
assert item_c.parent == item_p
assert item_p.children == [item_c]
def test_updating_a_parent(self):
"""Test we can create an item with a parent, then change the parent."""
item_p1 = TaskPaperItem('parent1')
item_p2 = TaskPaperItem('parent2')
item_c = TaskPaperItem('child', parent=item_p1)
item_c.parent = item_p2
assert item_c.parent == item_p2
assert item_p2.children == [item_c]
assert item_p1.children == []
def test_updating_to_same_parent(self):
"""
Create an item with a parent, change the parent to existing parent,
check nothing happens.
"""
item_p = TaskPaperItem('parent')
item_c = TaskPaperItem('child', parent=item_p)
item_c.parent == item_p
assert item_c.parent == item_p
assert item_p.children == [item_c]
def test_removing_a_parent(self):
"""
|
Create an item with a parent, then set the parent to None. Check the
child is removed from the list of its previous parents' children.
"""
item_p = TaskPaperItem('parent')
item_c = TaskPaperItem('child', parent=
|
item_p)
item_c.parent = None
assert item_c.parent is None
assert item_p.children == []
def test_detect_item_cannot_be_its_parents_parent(self):
"""
An item cannot be the parent of its own parent.
"""
item_p = TaskPaperItem('parent')
item_c = TaskPaperItem('child', parent=item_p)
with pytest.raises(TaskPaperError):
item_p.parent = item_c
@given(lists(taskpaper_item_strategy(), min_size=2))
def test_detecting_circular_chain(self, items):
"""
We detect an arbitrarily long circular parent chain.
"""
# Create a chain of parent-child relationships
# items[0] -> items[1] -> ... -> items[n]
for idx, alt_item in enumerate(items[1:], start=1):
items[idx-1].parent = alt_item
# Now make the first item the parent of the last, and check we
# get an exception.
with pytest.raises(TaskPaperError):
items[-1].parent = items[0]
def test_an_item_cannot_be_its_own_parent(self):
"""
An item cannot be its own parent.
"""
item = TaskPaperItem('hello world')
with pytest.raises(TaskPaperError):
item.parent = item
|
mkmeral/TevitolApplication
|
application/backend/application/views.py
|
Python
|
gpl-3.0
| 2,676
| 0.00299
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.contrib.auth.models import User
from application.serializers import UserSerializer, ApplicationSerializer, ApplicationListSerializer
from rest_framework import viewsets, status
from application.models import Application, validate_tc
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.http import Http404
from rest_framework import permissions
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.parsers import FileUploadParser
class ApplicationList(APIView):
def get(self, request, format=None):
applications = Application.objects.all()
serializer = ApplicationListSerializer(applications, many=True)
return Response(serializer.data)
class ApplicationDetail(APIView):
def get_object(self, pk):
try:
return Application.objects.get(pk=pk)
|
except Application.DoesNotExist:
raise Http404
def get(self, request, pk, form
|
at=None):
applications = self.get_object(pk)
serializer = ApplicationSerializer(applications)
return Response(serializer.data)
def put(self, request, pk, format=None):
application = self.get_object(pk)
serializer = ApplicationSerializer(application, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
application = self.get_object(pk)
application.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ApplicationCreate(APIView):
def post(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
Application.objects.create(user=User.objects.get(pk=serializer.data['id']))
return Response(serializer.data)
class FileUpload(APIView):
parser_classes = (FileUploadParser,)
def post(self, request, format=None):
file_obj = request.FILES['file']
# do some stuff with uploaded file
return Response(status=204)
@api_view(['GET'])
def tc_validator(request, tc):
if len(User.objects.filter(username=tc)) != 0:
return Response({'tc': tc}, status=status.HTTP_302_FOUND)
try:
validate_tc(tc)
return Response({'tc': tc}, status=status.HTTP_200_OK)
except:
return Response({'tc': tc}, status=status.HTTP_400_BAD_REQUEST)
|
Xinglab/rmats2sashimiplot
|
src/MISO/misopy/test_miso.py
|
Python
|
gpl-2.0
| 7,024
| 0.002847
|
#!/usr/bin/env python
import os
import sys
import unittest
import pysam
import sam_utils
class TestMISO(unittest.TestCase):
"""
Test MISO functionality.
"""
def setUp(self):
# Find out the current directory
self.miso_path = \
os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
self.tests_data_dir = \
os.path.join(self.miso_path, "test-data")
self.events_analysis_cmd = "miso"
self.tests_output_dir = \
os.path.join(self.miso_path, "test-output")
self.test_sam_filename = \
os.path.join(self.tests_data_dir,
"sam-data",
"c2c12.Atp2b1.sam")
self.gff_events_dir = \
os.path.join(self.miso_path, "gff-events")
self.sam_to_bam_script = "sam_to_bam"
self.index_gff_script = "index_gff"
def test_a_sam_to_bam(self):
"""
Test conversion of SAM to BAM.
The 'a' ensures this runs first.
"""
print "Testing conversion of SAM to BAM..."
output_dir = \
os.path.join(self.tests_output_dir, "sam-output")
sam_to_bam_cmd = \
"%s --convert %s %s" %(self.sam_to_bam_script,
self.test_sam_filename,
output_dir)
print "Executing: %s" %(sam_to_bam_cmd)
os.system(sam_to_bam_cmd)
# Make sure conversion worked; sorted, indexed BAM file is outputted
assert(os.path.exists(os.path.join(output_dir,
"c2c12.Atp2b1.sorted.bam")))
def test_a2_strandedness(self):
"""
Test that strandedness is read correctly.
"""
# Read 1 is forward, on plus strand
# Has flag 129, i.e. '0b10000001'
f_read = pysam.AlignedRead()
f_read.qname = "f_read"
f_read.flag = 129
f_read.rname = 9
f_read.pos = 4991443
# Read 2 is reverse, on minus strand
# Has flag 81, i.e. '0b1010001'
r_read = pysam.AlignedRead()
r_read.qname = "r_read"
r_read.flag = 81
r_read.rname = 9
r_read.pos = 4991578
# Test that we can read the BAM strand flag correctly
assert(sam_utils.flag_to_strand(f_read.flag) == "+"), \
"Error in determining plus strand of read."
assert(sam_utils.flag_to_strand(r_read.flag) == "-"), \
"Error in determining minus strand of read."
##
## Test stranded-ness rules
##
# fr-unstranded,
# fr-firststrand,
plus_target_strand = "+"
minus_target_strand = "-"
# fr-unstranded: both strand reads should match
# either target strand
print "Testing fr-unstranded..."
for curr_read in [f_read, r_read]:
for target in [plus_target_strand, minus_target_strand]:
print "Checking read ", curr_read.qname, " against ", target
assert(sam_utils.read_matches_strand(curr_read,
target,
"fr-unstranded") == True), \
"Error checking strand of fr-unstranded."
# fr-firststrand: forward read must match target strand,
# i.e. +read matches +target, and -read matches -target
# test +read
print "Testing fr-firststrand..."
assert(sam_utils.read_matches_strand(f_read,
plus_target_strand,
"fr-firststrand") == True), \
|
"+read must match +target under fr-firstrand
|
."
assert(sam_utils.read_matches_strand(f_read,
minus_target_strand,
"fr-firststrand") == False), \
"+read must match +target under fr-firststrand."
# test -read
assert(sam_utils.read_matches_strand(r_read,
plus_target_strand,
"fr-firststrand") == False), \
"-read must match -target under fr-firststrand."
assert(sam_utils.read_matches_strand(r_read,
minus_target_strand,
"fr-firststrand") == True), \
"-read must match -target under fr-firststrand."
# Test fr-firststrand read pair
pe = (300, 10)
assert(sam_utils.read_matches_strand((f_read, r_read),
plus_target_strand,
"fr-firststrand",
paired_end=pe) == True), \
"(+, -) must match +target under fr-firststrand."
# If target strand is -, second read must match minus strand
assert(sam_utils.read_matches_strand((f_read, r_read),
minus_target_strand,
"fr-firststrand",
paired_end=pe) == True), \
"(+, -) must match -target under fr-firststrand."
def test_z_gene_psi(self):
"""
Test gene-level Psi inferences using SAM/BAM reads.
The 'z' ensures this runs last.
"""
print "Testing gene-level Psi..."
sam_dir = os.path.join(self.tests_output_dir, "sam-output")
bam_filename = os.path.join(sam_dir, "c2c12.Atp2b1.sorted.bam")
read_len = 36
insert_mean = 250
insert_sd = 30
# First index the GFF of interest
gff_filename = os.path.join(self.gff_events_dir,
"mm9",
"genes",
"Atp2b1.mm9.gff")
gff_index_dir = os.path.join(self.gff_events_dir,
"mm9",
"genes",
"Atp2b1",
"indexed")
print "Testing GFF indexing of: %s" %(gff_filename)
index_cmd = "%s --index %s %s" %(self.index_gff_script,
gff_filename,
gff_index_dir)
print "Executing: %s" %(index_cmd)
os.system(index_cmd)
output_dir = os.path.join(self.tests_output_dir,
"gene-psi-output")
miso_cmd = "%s --run %s %s --output-dir %s --read-len %d " \
%(self.events_analysis_cmd,
gff_index_dir,
bam_filename,
output_dir,
read_len)
print "Executing: %s" %(miso_cmd)
os.system(miso_cmd)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
vamst/COSMOS2
|
cosmos/web/admin.py
|
Python
|
gpl-3.0
| 358
| 0.002793
|
# from .. import Workflow, Stage, Tas
|
k, TaskFile
#
# from flask.ext import admin
# from flask.ext.admin.contrib import sqla
#
#
# def add_cosmos_admin(flask_app, session):
# adm = admin.Admin(flask_app, 'Flask Admin', base_template="admin_layout.html")
# f
|
or m in [Workflow, Stage, Task, TaskFile]:
# adm.add_view(sqla.ModelView(m, session))
|
jamiefolsom/edx-platform
|
lms/djangoapps/instructor_task/tests/test_tasks_helper.py
|
Python
|
agpl-3.0
| 61,734
| 0.003193
|
# -*- coding: utf-8 -*-
"""
Unit tests for LMS instructor-initiated background tasks helper functions.
Tests that CSV grade report generation works with unicode emails.
"""
import ddt
from mock import Mock, patch
import tempfile
from openedx.core.djangoapps.course_groups import cohorts
import unicodecsv
from django.core.urlresolvers import reverse
from django.test.uti
|
ls import override_settings
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from certificates.models import CertificateStatuses
from certificates.tests.factories import GeneratedCertificateFactory, CertificateWhitelistFactory
from course_modes.models import CourseMode
from courseware.tests.factories import InstructorFactory
from instructor_task.tests.test_base import InstructorTaskCourseT
|
estCase, TestReportMixin, InstructorTaskModuleTestCase
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from shoppingcart.models import Order, PaidCourseRegistration, CourseRegistrationCode, Invoice, \
CourseRegistrationCodeInvoiceItem, InvoiceTransaction, Coupon
from student.tests.factories import UserFactory, CourseModeFactory
from student.models import CourseEnrollment, CourseEnrollmentAllowed, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED
from verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from instructor_task.models import ReportStore
from instructor_task.tasks_helper import (
cohort_students_and_upload,
upload_problem_responses_csv,
upload_grades_csv,
upload_problem_grade_report,
upload_students_csv,
upload_may_enroll_csv,
upload_enrollment_report,
upload_exec_summary_report,
generate_students_certificates,
)
from openedx.core.djangoapps.util.testing import ContentGroupTestCase, TestConditionalContent
@ddt.ddt
class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV grade report generation works.
"""
def setUp(self):
super(TestInstructorGradeReport, self).setUp()
self.course = CourseFactory.create()
@ddt.data([u'student@example.com', u'ni\xf1o@example.com'])
def test_unicode_emails(self, emails):
"""
Test that students with unicode characters in emails is handled.
"""
for i, email in enumerate(emails):
self.create_student('student{0}'.format(i), email)
self.current_task = Mock()
self.current_task.update_state = Mock()
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
num_students = len(emails)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_grading_failure(self, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the
progress dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(self.create_student('username', 'student@example.com'), {}, 'Cannot grade student')
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
def _verify_cell_data_for_user(self, username, course_id, column_header, expected_cell_content):
"""
Verify cell data in the grades CSV for a particular user.
"""
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_grades_csv(None, None, course_id, None, 'graded')
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(course_id)[0][0]
with open(report_store.path_to(course_id, report_csv_filename)) as csv_file:
for row in unicodecsv.DictReader(csv_file):
if row.get('username') == username:
self.assertEqual(row[column_header], expected_cell_content)
def test_cohort_data_in_grading(self):
"""
Test that cohort data is included in grades csv if cohort configuration is enabled for course.
"""
cohort_groups = ['cohort 1', 'cohort 2']
course = CourseFactory.create(cohort_config={'cohorted': True, 'auto_cohort': True,
'auto_cohort_groups': cohort_groups})
user_1 = 'user_1'
user_2 = 'user_2'
CourseEnrollment.enroll(UserFactory.create(username=user_1), course.id)
CourseEnrollment.enroll(UserFactory.create(username=user_2), course.id)
# In auto cohorting a group will be assigned to a user only when user visits a problem
# In grading calculation we only add a group in csv if group is already assigned to
# user rather than creating a group automatically at runtime
self._verify_cell_data_for_user(user_1, course.id, 'Cohort Name', '')
self._verify_cell_data_for_user(user_2, course.id, 'Cohort Name', '')
def test_unicode_cohort_data_in_grading(self):
"""
Test that cohorts can contain unicode characters.
"""
course = CourseFactory.create(cohort_config={'cohorted': True})
# Create users and manually assign cohorts
user1 = UserFactory.create(username='user1')
user2 = UserFactory.create(username='user2')
CourseEnrollment.enroll(user1, course.id)
CourseEnrollment.enroll(user2, course.id)
professor_x = u'ÞrÖfessÖr X'
magneto = u'MàgnëtÖ'
cohort1 = CohortFactory(course_id=course.id, name=professor_x)
cohort2 = CohortFactory(course_id=course.id, name=magneto)
cohort1.users.add(user1)
cohort2.users.add(user2)
self._verify_cell_data_for_user(user1.username, course.id, 'Cohort Name', professor_x)
self._verify_cell_data_for_user(user2.username, course.id, 'Cohort Name', magneto)
def test_unicode_user_partitions(self):
"""
Test that user partition groups can contain unicode characters.
"""
user_groups = [u'ÞrÖfessÖr X', u'MàgnëtÖ']
user_partition = UserPartition(
0,
'x_man',
'X Man',
[
Group(0, user_groups[0]),
Group(1, user_groups[1])
]
)
# Create course with group configurations
self.initialize_course(
course_factory_kwargs={
'user_partitions': [user_partition]
}
)
_groups = [group.name for group in self.course.user_partitions[0].groups]
self.assertEqual(_groups, user_groups)
def test_cohort_scheme_partition(self):
"""
Test that cohort-schemed user partitions are ignored in the
grades export.
"""
# Set up a course with 'cohort' and 'random' user partitions.
cohort_scheme_partition = UserPartition(
0,
'Cohort-schemed Group Configuration',
'Group Configuration based on Cohorts',
[Group(0, '
|
noironetworks/group-based-policy
|
gbpservice/nfp/common/constants.py
|
Python
|
apache-2.0
| 4,212
| 0.000237
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
FIREWALL = 'firewall'
LOADBALANCERV2 = 'loadbalancerv2'
VPN = 'vpn'
GENERIC_CONFIG = 'generic_config'
GBP_MODE = "gbp"
NEUTRON_MODE = "neutron"
NOVA_MODE = "nova"
NEUTRON_PORT = "
|
neutron_port"
GBP_PORT = "gbp_policy_target"
NEUTRON_NETWORK = "neutron_network"
GBP_NETWORK = "gbp_group"
PROVIDER = "provider"
CONSUMER = "consumer"
STITCHING = "stitching"
MANAGEMENT = "management"
MONITOR = "monitoring"
GATEWAY_TYPE = "gateway"
ENDPOINT_TYPE = "endpoint"
CREATE = "create"
UPDATE = "update"
DELETE = "delete"
SUCCESS = 'SUCCESS'
FOREVER = 'forever'
INITIAL = 'initial'
ACTIV
|
E_PORT = "ACTIVE"
STANDBY_PORT = "STANDBY"
MASTER_PORT = "MASTER"
STANDALONE_PORT = "STANDALONE"
ACTIVE = "ACTIVE"
# REVISIT(ashu) - Merge to have single BUILD state
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
ERROR = "ERROR"
BUILD = "BUILD"
NFP_STATUS = [ACTIVE, PENDING_CREATE, PENDING_UPDATE, PENDING_DELETE, ERROR]
DEVICE_ORCHESTRATOR = "device_orch"
SERVICE_ORCHESTRATOR = "service_orch"
HEAT_CONFIG_TAG = 'heat_config'
CONFIG_INIT_TAG = 'config_init'
ANSIBLE_TAG = 'ansible'
CUSTOM_JSON = 'custom_json'
COMPLETED = "COMPLETED"
IN_PROGRESS = "IN_PROGRESS"
CONFIG_INIT_TAG = "config_init"
CONFIG_SCRIPT = 'config_script'
CONFIG_TAG_RESOURCE_MAP = {
HEAT_CONFIG_TAG: 'heat',
CONFIG_INIT_TAG: 'config_init',
ANSIBLE_TAG: 'ansible',
CUSTOM_JSON: 'custom_json'}
MAXIMUM_INTERFACES = 'maximum_interfaces'
SUPPORTS_SHARING = 'supports_device_sharing'
SUPPORTS_HOTPLUG = 'supports_hotplug'
PERIODIC_HM = 'periodic_healthmonitor'
DEVICE_TO_BECOME_DOWN = 'DEVICE_TO_BECOME_DOWN'
METADATA_SUPPORTED_ATTRIBUTES = [MAXIMUM_INTERFACES,
SUPPORTS_SHARING,
SUPPORTS_HOTPLUG]
LOADBALANCERV2_RPC_API_VERSION = "1.0"
HEALTHMONITOR_RESOURCE = 'healthmonitor'
INTERFACE_RESOURCE = 'interfaces'
ROUTES_RESOURCE = 'routes'
MANAGEMENT_INTERFACE_NAME = 'mgmt_interface'
VYOS_VENDOR = 'vyos'
HAPROXY_LBAASV2 = 'haproxy'
NFP_VENDOR = 'nfp'
L3_INSERTION_MODE = "l3"
request_event = "REQUEST"
response_event = "RESPONSE"
error_event = "ERROR"
# POLLING EVENTS SPACING AND MAXRETRIES
DEVICE_SPAWNING_SPACING = 10
DEVICE_SPAWNING_MAXRETRY = 25
DEVICE_BEING_DELETED_SPACING = 5
DEVICE_BEING_DELETED_MAXRETRY = 20
APPLY_USER_CONFIG_IN_PROGRESS_SPACING = 10
APPLY_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
UPDATE_USER_CONFIG_PREPARING_TO_START_SPACING = 10
UPDATE_USER_CONFIG_PREPARING_TO_START_MAXRETRY = 20
UPDATE_USER_CONFIG_STILL_IN_PROGRESS_MAXRETRY = 300
DELETE_USER_CONFIG_IN_PROGRESS_SPACING = 10
DELETE_USER_CONFIG_IN_PROGRESS_MAXRETRY = 20
CHECK_USER_CONFIG_COMPLETE_SPACING = 10
CHECK_USER_CONFIG_COMPLETE_MAXRETRY = 40
PULL_NOTIFICATIONS_SPACING = 10
# nfp_node_deriver_config
# all units in sec.
SERVICE_CREATE_TIMEOUT = 1500
SERVICE_DELETE_TIMEOUT = 600
# heat stack creation timeout
STACK_ACTION_WAIT_TIME = 300
# default directory for config files
CONFIG_DIR = '/etc/nfp/'
NFP_STATUS_MAP = {
ERROR: {'status': ERROR,
'status_details': 'Node deployment failed'},
ACTIVE: {'status': ACTIVE,
'status_details': 'Node deployment completed'},
BUILD: {'status': BUILD,
'status_details': 'Node deployment is in progress'},
PENDING_CREATE: {'status': BUILD,
'status_details': 'Node deployment is in progress'},
PENDING_UPDATE: {'status': BUILD,
'status_details': 'update of node is in progress'},
PENDING_DELETE: {'status': BUILD,
'status_details': 'delete of node is in progress'}
}
|
nextgenusfs/ufits
|
amptk/install.py
|
Python
|
bsd-2-clause
| 3,232
| 0.008045
|
#!/usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import os
import argparse
import tarfile
import gzip
import json
import requests
import shutil
from amptk import amptklib
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self,prog):
super(MyFormatter,self).__init__(prog,max_help_position=50)
def main(args):
parser=argparse.ArgumentParser(prog='amptk-install.py',
description='''Script to download preformatted databases''',
epilog="""Written by Jon Palmer (2019) nextgenusfs@gmail.com""",
formatter_class=MyFormatter)
parser.add_argument('-i','--input', nargs='+', required=True, choices=['ITS', '16S', 'LSU', 'COI'], help='Install Databases')
parser.add_argument('-f','--force', action='store_true', help='Overwrite existing databases')
parser.add_argument('-l','--local', action='store_true', help='Use local downloads.json for links')
args=parser.parse_args(args)
parentdir = os.path.join(os.path.dirname(amptklib.__file__))
# downd from github to get most recent databases
if not args.local:
try:
print('Retrieving download links from GitHub Repo')
URL = json.loads(requests.get("https://raw.githubusercontent.com/nextgenusfs/amptk/master/amptk/downloads.json").text)
except:
print('Unable to download links from GitHub, using funannotate version specific links')
with open(os.path.join(os.path.dirname(__file__), 'downloads.json')) as infile:
URL = json.load(infile)
else:
with open(os.path.join(os.path.dirname(__file__), 'downloads.json')) as infile:
URL = json.load(infile)
for x in args.input:
udbfile = os.path.join(parentdir, 'DB', x+'.udb')
if os.path.isfile(udbfile):
if not args.force:
print("A formated database was found, to overwrite use '--force'. You can add more custom databases by using the `amptk da
|
tabase` command.")
sys.exit(1)
#download
if not x in URL:
if args.force:
continue
print("%s not valid, choices are ITS, 16S, LSU, COI" % x)
|
sys.exit(1)
print("Downloading %s pre-formatted database" % x)
address = URL.get(x)
if not os.path.isfile(x+'.amptk.tar.gz'):
amptklib.download(address, x+'.amptk.tar.gz')
tfile = tarfile.open(x+'.amptk.tar.gz', 'r:gz')
tfile.extractall(x)
for file in os.listdir(x):
shutil.move(os.path.join(x,file), os.path.join(parentdir, 'DB', file))
shutil.rmtree(x)
os.remove(x+'.amptk.tar.gz')
print('Extracting FASTA files for {:}'.format(x))
extracted = os.path.join(parentdir, 'DB', x+'.extracted.fa')
cmd = ['vsearch', '--udb2fasta', udbfile, '--output', extracted]
amptklib.runSubprocess5(cmd)
print("{:} taxonomy database installed to {:}".format(x, os.path.join(parentdir, 'DB')))
if __name__ == "__main__":
main()
|
resmio/django-sendgrid
|
sendgrid/tests/test_signals.py
|
Python
|
bsd-2-clause
| 3,625
| 0
|
from django.test import TestCase, Client
from sendgrid import utils, signals
import json
class SignalTestCase(TestCase):
def setUp(self):
self.client = Client()
self.email_data = {'subject': 'Test Subject',
'body': 'Hi, I am a test body',
'from_email': 'email@example.com',
'to': ('other_email@example.com', )}
def test_received_email(self):
""" Test signals triggered by sendgrid callback.
"""
data = []
def email_event_handler(sender, signal):
data.append((sender, signal, ))
signals.email_event.connect(email_event_handler)
# check if we received signals
self.assertEqual(len(data), 0)
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# check if we received the signal triggered by the email creation
self.assertEqual(len(data), 1)
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'processed',
'timestamp': '123456789',
}, ]),
content_type='application/json')
# verify that we received a signa
|
l
self.assertEqual(len(data), 2)
self.assertEqual(data[1][0].ev
|
ent, 'processed')
self.assertEqual(data[1][0].uuid, message.uuid)
self.assertEqual(response.status_code, 200)
def test_dupe_signals(self):
""" Test handling of duplicate signals.
"""
data = []
def email_event_handler(sender, signal):
data.append((sender, signal, ))
signals.email_event.connect(email_event_handler)
# check if we received signals
self.assertEqual(len(data), 0)
message = utils.SendgridEmailMessage(**self.email_data)
message.send()
# check if we received the signal triggered by the email creation
self.assertEqual(len(data), 1)
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'delivered',
'timestamp': '123456789',
}, ]),
content_type='application/json')
# verify that we received a signal
self.assertEqual(len(data), 2)
self.assertEqual(data[1][0].event, 'delivered')
self.assertEqual(data[1][0].uuid, message.uuid)
self.assertEqual(response.status_code, 200)
response = self.client.post('/sendgrid_callback/',
data=json.dumps([{
'email': 'other_email@example.com',
'uuid': message.uuid,
'event': 'delivered',
'timestamp': '123456790',
}, ]),
content_type='application/json')
# verify that we received a signal
self.assertEqual(len(data), 2)
self.assertEqual(response.status_code, 200)
|
gem/oq-engine
|
openquake/hazardlib/source/non_parametric.py
|
Python
|
agpl-3.0
| 10,119
| 0
|
# The Hazard Library
# Copyright (C) 2013-2022 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.source.non_parametric` defines
:class:`NonParametricSeismicSource`
"""
import numpy
from openquake.baselib.general import block_splitter
from openquake.hazardlib.source.base import BaseSeismicSource
from openquake.hazardlib.geo.surface.gridded import GriddedSurface
from openquake.hazardlib.geo.surface.multi import MultiSurface
from openquake.hazardlib.source.rupture import \
NonParametricProbabilisticRupture
from openquake.hazardlib.geo.utils import (angular_distance, KM_TO_DEGREES,
get_spherical_bounding_box)
from openquake.hazardlib.geo.utils import angular_distance, KM_TO_DEGREES
from openquake.hazardlib.geo.mesh import Mesh
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.pmf import PMF
F32 = numpy.float32
U32 = numpy.uint32
class NonParametricSeismicSource(BaseSeismicSource):
"""
Non Parametric Seismic Source explicitly defines earthquake ruptures in the
constructor. That is earthquake ruptures are not generated algorithmically
from a set of source parameters.
Ruptures' rectonic region types are overwritten by source tectonic region
type.
:param data:
List of tuples. Each tuple must contain two items. The first item must
be an instance of :class:`openquake.hazardlib.source.rupture.Rupture`.
The second item must be an instance of
:class:`openquake.hazardlib.pmf.PMF` describing the probability of the
rupture to occur N times (the PMF must be defined from a minimum number
of occurrences equal to 0)
"""
code = b'N'
MODIFICATIONS = set()
def __init__(self, source_id, name, tectonic_region_type, data,
weights=None):
super().__init__(source_id, name, tectonic_region_type)
self.data = data
if weights is n
|
ot None:
assert len(weights) == len(data)
for (rup, pmf), weight in zip(data, weights):
rup.weight = weight
def iter_ruptures(self, **kwargs):
"""
Get a generator object that yields probabilistic ruptures the source
consists of.
:returns:
Generator of instances of :class:`openquake.ha
|
zardlib.source.
rupture.NonParametricProbabilisticRupture`.
"""
for rup, pmf in self.data:
if rup.mag >= self.min_mag:
yield NonParametricProbabilisticRupture(
rup.mag, rup.rake, self.tectonic_region_type,
rup.hypocenter, rup.surface, pmf, weight=rup.weight)
def few_ruptures(self):
"""
Fast version of iter_ruptures used in estimate_weight
"""
for i, (rup, pmf) in enumerate(self.data):
if i % 50 == 0 and rup.mag >= self.min_mag:
yield NonParametricProbabilisticRupture(
rup.mag, rup.rake, self.tectonic_region_type,
rup.hypocenter, rup.surface, pmf, weight=rup.weight)
def __iter__(self):
if len(self.data) == 1: # there is nothing to split
yield self
return
for i, block in enumerate(block_splitter(self.data, 100)):
source_id = '%s:%d' % (self.source_id, i)
src = self.__class__(source_id, self.name,
self.tectonic_region_type, block)
src.num_ruptures = len(block)
src.trt_smr = self.trt_smr
yield src
def count_ruptures(self):
"""
See :meth:
`openquake.hazardlib.source.base.BaseSeismicSource.count_ruptures`.
"""
return len(self.data)
def get_min_max_mag(self):
"""
Return the minimum and maximum magnitudes of the ruptures generated
by the source
"""
min_mag = min(rup.mag for rup, pmf in self.data)
max_mag = max(rup.mag for rup, pmf in self.data)
return min_mag, max_mag
def get_bounding_box(self, maxdist):
"""
Bounding box containing the surfaces, enlarged by the maximum distance
"""
surfaces = []
for rup, _ in self.data:
if isinstance(rup.surface, MultiSurface):
surfaces.extend(rup.surface.surfaces)
else:
surfaces.append(rup.surface)
lons = []
lats = []
for surf in surfaces:
lo1, lo2, la1, la2 = surf.get_bounding_box()
lons.extend([lo1, lo2])
lats.extend([la1, la2])
west, east, north, south = get_spherical_bounding_box(lons, lats)
a1 = maxdist * KM_TO_DEGREES
a2 = angular_distance(maxdist, north, south)
return west - a2, south - a1, east + a2, north + a1
def is_gridded(self):
"""
:returns: True if containing only GriddedRuptures, False otherwise
"""
for rup, _ in self.data:
if not isinstance(rup.surface, GriddedSurface):
return False
return True
def todict(self):
"""
Convert a GriddedSource into a dictionary of arrays
"""
assert self.is_gridded(), '%s is not gridded' % self
n = len(self.data)
m = sum(len(rup.surface.mesh) for rup, pmf in self.data)
p = len(self.data[0][1].data)
dic = {'probs_occur': numpy.zeros((n, p)),
'magnitude': numpy.zeros(n),
'rake': numpy.zeros(n),
'hypocenter': numpy.zeros((n, 3), F32),
'mesh3d': numpy.zeros((m, 3), F32),
'slice': numpy.zeros((n, 2), U32)}
start = 0
for i, (rup, pmf) in enumerate(self.data):
dic['probs_occur'][i] = [prob for (prob, _) in pmf.data]
dic['magnitude'][i] = rup.mag
dic['rake'][i] = rup.rake
dic['hypocenter'][i] = (rup.hypocenter.x, rup.hypocenter.y,
rup.hypocenter.z)
mesh = rup.surface.mesh.array.T # shape (npoints, 3)
dic['mesh3d'][start: start + len(mesh)] = mesh
dic['slice'][i] = start, start + len(mesh)
start += len(mesh)
return dic
def fromdict(self, dic, weights=None):
"""
Populate a GriddedSource with ruptures
"""
assert not self.data, '%s is not empty' % self
i = 0
for mag, rake, hp, probs, (start, stop) in zip(
dic['magnitude'], dic['rake'], dic['hypocenter'],
dic['probs_occur'], dic['slice']):
mesh = Mesh(dic['mesh3d'][start:stop, 0],
dic['mesh3d'][start:stop, 1],
dic['mesh3d'][start:stop, 2])
surface = GriddedSurface(mesh)
pmf = PMF([(prob, i) for i, prob in enumerate(probs)])
hypocenter = Point(hp[0], hp[1], hp[2])
rup = NonParametricProbabilisticRupture(
mag, rake, self.tectonic_region_type, hypocenter, surface, pmf,
weight=None if weights is None else weights[i])
self.data.append((rup, pmf))
i += 1
def __repr__(self):
return '<%s %s gridded=%s>' % (
self.__class__.__name__, self.source_id, self.is_gridded())
@property
def mesh_size(self):
"""
:returns: the number of points in the underlying meshes (reduced)
"""
n = 0
for rup in self.few_ruptures
|
radman404/Who-s-attacking-me-now--
|
wamnclient.py
|
Python
|
gpl-2.0
| 2,126
| 0.024929
|
#!/usr/bin/python
import pygeoip
import json
from logsparser.lognormalizer import LogNormalizer as LN
import gzip
import glob
import socket
import urllib2
IP = 'IP.Of,Your.Server'
normalizer = LN('/usr/local/share/logsparser/normalizers')
gi = pygeoip.GeoIP('../GeoLiteCity.dat')
def complete(text, state):
return (glob.glob(text+'*')+[none])[state]
def sshcheck():
attacks = {}
users = {}
try:
import readline, rlcompleter
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
except ImportError:
print 'No Tab Completion'
LOGs = raw_input('Enter the path to the log file: ')
for LOG in LOGs.split(' '):
if LOG.endswith('.gz'):
auth_logs = gzip.GzipFile(LOG, 'r')
else:
auth_logs = open(LOG, 'r')
if len(LOGs) is '1':
print "Parsing log file"
else:
print "Parsing log files"
for log in auth_logs:
l = {"raw": log }
normalizer.normalize(l)
if l.get('action') == 'fail' and l.get('program') == 'sshd':
u = l['user']
p = l['source_ip']
o1, o2, o3, o4 = [int(i) for i in p.split('.')]
if o1 == 192 and o2 == 168 or o1 == 172 and o2 in range(16, 32) or o1 == 10:
print "Private IP, %s No geolocation data" %str(p)
attacks[p] = attacks.get(p, 0) + 1
getip()
dojson(attacks, IP)
def getip():
global IP
if IP is 0:
try:
i = urllib2.Request("http://icanhazip.com")
p = urllib2.urlopen(i)
IP = p.read()
except:
print "can't seem to grab your IP please set IP variable so We can better map attacks"
def dojson(attacks, IP):
data = {}
for i,(a,p) in enumerate(attacks.iteritems()):
datalist = [{ 'ip': a, 'attacks': p, 'local_ip': IP }]
data[i] = datalist
|
newd
|
ata = data
newjson = json.dumps(newdata)
print json.loads(newjson)
send(newjson)
def send(data):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('Ip.Of.Your.Server', 9999))
s.sendall(data)
s.close()
try:
sshcheck()
except KeyboardInterrupt:
print '\nCtrl+C Exiting...'
exit(0)
|
markroxor/gensim
|
gensim/corpora/indexedcorpus.py
|
Python
|
lgpl-2.1
| 5,378
| 0.002789
|
#!/usr/bin/env python
# -*- codi
|
ng: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Indexed corpus is a mechanism for random-accessing corpora.
While the standard corpus interface in gensim allows iterating over corpus with
`for doc in corpus: pass`, indexed corpus allows accessing the documents with
`corpus[docno]` (in O(1) look-up time).
This functionality is achieved by storing an extra file (by default named
|
the same
as the corpus file plus '.index' suffix) that stores the byte offset of the beginning
of each document.
"""
import logging
import six
import numpy
from gensim import interfaces, utils
logger = logging.getLogger('gensim.corpora.indexedcorpus')
class IndexedCorpus(interfaces.CorpusABC):
def __init__(self, fname, index_fname=None):
"""
Initialize this abstract base class, by loading a previously saved index
from `index_fname` (or `fname.index` if `index_fname` is not set).
This index will allow subclasses to support the `corpus[docno]` syntax
(random access to document #`docno` in O(1)).
>>> # save corpus in SvmLightCorpus format with an index
>>> corpus = [[(1, 0.5)], [(0, 1.0), (1, 2.0)]]
>>> gensim.corpora.SvmLightCorpus.serialize('testfile.svmlight', corpus)
>>> # load back as a document stream (*not* plain Python list)
>>> corpus_with_random_access = gensim.corpora.SvmLightCorpus('tstfile.svmlight')
>>> print(corpus_with_random_access[1])
[(0, 1.0), (1, 2.0)]
"""
try:
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
self.index = utils.unpickle(index_fname)
# change self.index into a numpy.ndarray to support fancy indexing
self.index = numpy.asarray(self.index)
logger.info("loaded corpus index from %s", index_fname)
except Exception:
self.index = None
self.length = None
@classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None,
progress_cnt=None, labels=None, metadata=False):
"""
Iterate through the document stream `corpus`, saving the documents to `fname`
and recording byte offset of each document. Save the resulting index
structure to file `index_fname` (or `fname`.index is not set).
This relies on the underlying corpus class `serializer` providing (in
addition to standard iteration):
* `save_corpus` method that returns a sequence of byte offsets, one for
each saved document,
* the `docbyoffset(offset)` method, which returns a document
positioned at `offset` bytes within the persistent storage (file).
* metadata if set to true will ensure that serialize will write out article titles to a pickle file.
Example:
>>> MmCorpus.serialize('test.mm', corpus)
>>> mm = MmCorpus('test.mm') # `mm` document stream now has random access
>>> print(mm[42]) # retrieve document no. 42, etc.
"""
if getattr(corpus, 'fname', None) == fname:
raise ValueError("identical input vs. output corpus filename, refusing to serialize: %s" % fname)
if index_fname is None:
index_fname = utils.smart_extension(fname, '.index')
kwargs = {'metadata': metadata}
if progress_cnt is not None:
kwargs['progress_cnt'] = progress_cnt
if labels is not None:
kwargs['labels'] = labels
offsets = serializer.save_corpus(fname, corpus, id2word, **kwargs)
if offsets is None:
raise NotImplementedError(
"Called serialize on class %s which doesn't support indexing!" % serializer.__name__
)
# store offsets persistently, using pickle
# we shouldn't have to worry about self.index being a numpy.ndarray as the serializer will return
# the offsets that are actually stored on disk - we're not storing self.index in any case, the
# load just needs to turn whatever is loaded from disk back into a ndarray - this should also ensure
# backwards compatibility
logger.info("saving %s index to %s", serializer.__name__, index_fname)
utils.pickle(offsets, index_fname)
def __len__(self):
"""
Return the index length if the corpus is indexed. Otherwise, make a pass
over self to calculate the corpus length and cache this number.
"""
if self.index is not None:
return len(self.index)
if self.length is None:
logger.info("caching corpus length")
self.length = sum(1 for _ in self)
return self.length
def __getitem__(self, docno):
if self.index is None:
raise RuntimeError("Cannot call corpus[docid] without an index")
if isinstance(docno, (slice, list, numpy.ndarray)):
return utils.SlicedCorpus(self, docno)
elif isinstance(docno, six.integer_types + (numpy.integer,)):
return self.docbyoffset(self.index[docno])
else:
raise ValueError('Unrecognised value for docno, use either a single integer, a slice or a numpy.ndarray')
|
leschzinerlab/ISAC
|
ISAC.py
|
Python
|
mit
| 8,169
| 0.045048
|
#!/usr/bin/env python
import optparse
from sys import *
import os,sys,re
from optparse import OptionParser
import glob
import subprocess
from os import system
import linecache
import time
#=========================
def setupParserOptions():
parser = optparse.OptionParser()
parser.set_usage("%prog -i <stack> --nodes=<nodes> --threads=<threads>")
parser.add_option("-i",dest="stack",type="string",metavar="FILE",
help="Input stack")
#parser.add_option("--apix",dest="apix",type="float", metavar="FLOAT",
# help="Pixel size")
#parser.add_option("--lp",dest="lp",type="int", metavar="INT",default=15,
# help="Low pass filter to use during alignment. (Default=15 A)")
#parser.add_option("--hp",dest="hp",type="int", metavar="INT",default=500,
# help="High pass filter to use during alignment. (Default=500 A)")
parser.add_option("--img_per_grp",dest="img_per_grp",type="int", metavar="INT",default=60,
help="Number of images per group. (Default=60)")
parser.add_option("--thld_err",dest="thld_err",type="int", metavar="INT",default=60,
help="Threshold pixel error when checking stability. (Default=1.75)")
parser.add_option("--max_round",dest="maxround",type="int", metavar="INT",default=5,
help="Max iterations for alignment. (Default=5)")
parser.add_option("--generations",dest="num_gen",type="int", metavar="INT",defa
|
ult=1,
help="Number of generations. (Default=1)")
parser.add_option("--queue",dest="queue",type="string", metavar="STRING",default='condo',
help="Queue for job submission. (Default=condo)")
parser.add_option("--nodes",dest="nodes",type="int", metavar="INT",default=20,
help="Number of nodes to distribute job over. (Default=20)")
parser.add_option("--threads",dest="threads",type="int", metavar="INT",default=8,
help="Number of
|
threads per node to run. (Default=8)")
parser.add_option("--walltime",dest="walltime",type="int", metavar="INT",default=8,
help="Walltime for job (estimated run time, in hours). (Default=8)")
parser.add_option("-d", action="store_true",dest="debug",default=False,
help="debug")
options,args = parser.parse_args()
if len(args) > 1:
parser.error("Unknown commandline options: " +str(args))
if len(sys.argv) < 3:
parser.print_help()
sys.exit()
params={}
for i in parser.option_list:
if isinstance(i.dest,str):
params[i.dest] = getattr(options,i.dest)
return params
#=============================
def checkConflicts(params):
if not os.path.exists(params['stack']):
print 'Error: stack %s does not exist. Exiting' %(params['stack'])
sys.exit()
if params['stack'][-4:] != '.img':
print 'Error: stack extension %s is not recognized as .img. Exiting' %(params['stack'][4:])
sys.exit()
if os.path.exists('dir_%s' %(params['stack'][:-4])):
print 'Error: output directory dir_%s already exists. Exiting.' %(params['stack'][:-4])
sys.exit()
#=========================
def getEMANPath():
emanpath = subprocess.Popen("env | grep EMAN2DIR", shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if emanpath:
emanpath = emanpath.replace("EMAN2DIR=","")
if os.path.exists(emanpath):
return emanpath
print "EMAN2 was not found, make sure it is in your path"
sys.exit()
#==============================
def convertIMG_to_BDB(params,scaling):
print '\n'
print 'Converting imagic stack to BDB format: %s.img -----> bdb:%s' %(params['stack'][:-4],params['stack'][:-4])
print '...this can take a while\n'
#Remove existing images if they are there
if os.path.exists('EMAN2DB/%s.bdb' %(params['stack'][:-4])):
os.remove('EMAN2DB/%s.bdb' %(params['stack'][:-4]))
os.remove('EMAN2DB/%s_64x64x1' %(params['stack'][:-4]))
if os.path.exists('EMAN2DB/%s_ali.bdb' %(params['stack'][:-4])):
os.remove('EMAN2DB/%s_ali.bdb' %(params['stack'][:-4]))
os.remove('EMAN2DB/%s_ali_64x64x1' %(params['stack'][:-4]))
#Convert stack from imagic to bdb format
cmd='e2proc2d.py %s.img bdb:%s --scale=%f --clip=64,64' %(params['stack'][:-4],params['stack'][:-4],float(scaling))
if params['debug'] is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
return 'bdb:%s' %(params['stack'][:-4])
#==============================
def getBoxSize(stack):
cmd='iminfo %s > tmp2222.txt' %(stack)
subprocess.Popen(cmd,shell=True).wait()
line=linecache.getline('tmp2222.txt',4)
boxsize=int(line.split()[-1].split('x')[0])
os.remove('tmp2222.txt')
return boxsize
#=============================
def submitISAC(bdbstack,queue,nodes,threads,walltime,ngen,maxround,imgpergroup,therr):
subscript='isac_%i.submit'%(int(time.time()))
o1=open(subscript,'w')
cmd='#!/bin/bash\n'
cmd+='### Inherit all current environment variables\n'
cmd+='#PBS -V\n'
cmd+='### Job name\n'
cmd+='#PBS -N isac1\n'
cmd+='### Keep Output and Error\n'
cmd+='#PBS -o isac.o$PBS_JOBID\n'
cmd+='#PBS -e isac.e$PBS_JOBID\n'
cmd+='### Queue name\n'
cmd+='#PBS -q %s\n' %(queue)
cmd+='### Specify the number of nodes and thread (ppn) for your job.\n'
cmd+='#PBS -l nodes=%i:ppn=%i\n' %(nodes,threads)
cmd+='### Tell PBS the anticipated run-time for your job, where walltime=HH:MM:SS\n'
cmd+='#PBS -l walltime=%i:00:00\n'%(walltime)
cmd+='#################################\n'
cmd+='### Switch to the working directory;\n'
cmd+='cd $PBS_O_WORKDIR\n'
cmd+='### Run:\n'
cmd+='mpirun /home/micianfrocco/software/EMAN2-2.12/bin/sxisac.py %s --stab_ali=2 --init_iter=1 --main_iter=1 --match_second=1 --radius=30 --max_round=%i --img_per_grp=%i --thld_err=%f --n_generations=%i\n' %(bdbstack,maxround,imgpergroup,therr,ngen)
o1.write(cmd)
print '\n...Submission script generated.\n'
print '\n Exit interactive node and submit ISAC job using the following command: \n'
print '\n qsub %s' %(subscript)
#==============================
if __name__ == "__main__":
params=setupParserOptions()
getEMANPath()
checkConflicts(params)
boxSize=getBoxSize(params['stack'])
ScalingFactor=64/float(boxSize)
if params['debug'] is True:
print 'ScalingFactor=%f' %(ScalingFactor)
print 'BoxSize=%f' %(boxSize)
#Filter stack
if os.path.exists('%s_filt.img' %(params['stack'][:-4])):
os.remove('%s_filt.img' %(params['stack'][:-4]))
if os.path.exists('%s_filt.hed' %(params['stack'][:-4])):
os.remove('%s_filt.hed' %(params['stack'][:-4]))
#cmd='proc2d %s %s_filt.img apix=%f hp=%i lp=%i' %(params['stack'],params['stack'][:-4],params['apix'],params['hp'],params['lp'])
#if params['debug'] is True:
# print cmd
#subprocess.Popen(cmd,shell=True).wait()
bdbstack=convertIMG_to_BDB(params,ScalingFactor)
#prepare stack for isac
print '\n ...Initializing stack...\n'
cmd='sxheader.py %s --params=active --one' %(bdbstack)
if params['debug'] is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
cmd='sxheader.py %s --params=xform.align2d --zero' %(bdbstack)
if params['debug'] is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
#Centering particles
print '\n ...Centering particles...\n'
cmd='mpirun -np 1 sxali2d.py %s dir_%s --ou=28 --xr="2 1" --ts="1 0.5" --maxit=33 --dst=90 --MPI' %(bdbstack,params['stack'][:-4])
if params['debug'] is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
#Rotate particles according to centering
print '\n ...Applying alignments from centering to particle stack...\n'
cmd='sxtransform2d.py %s %s_ali' %(bdbstack,bdbstack)
if params['debug'] is True:
print cmd
subprocess.Popen(cmd,shell=True).wait()
#Create cluster submission script & submit job
submitISAC('%s_ali' %(bdbstack),params['queue'],params['nodes'],params['threads'],params['walltime'],params['num_gen'],params['maxround'],params['img_per_grp'],params['thld_err'])
|
lociii/googleads-python-lib
|
examples/adspygoogle/dfp/v201306/creative_service/get_creatives_by_statement.py
|
Python
|
apache-2.0
| 2,090
| 0.002871
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all image creatives. The statement retrieves up to the
maximum page size limit of 500. To create an image creative,
run create_creatives.py."""
__author__ = ('Jeff Sham',
'Vincent Tsao')
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201306')
# Create statement object to only select image creatives.
values = [{
'key': 'creativeType',
'value': {
'xsi_type': 'TextValue',
'value': 'ImageCreative'
}
}]
filter_statement
|
= {'query': 'WHERE creativeType = :creativeType LIMIT 500',
'values': values}
# Get creatives by statement.
response = creative_service.GetCreativesByStatement(filter_statement)[0]
creatives = []
if 'results' in response:
creatives = response['results']
# Display results
|
.
for creative in creatives:
print ('Creative with id \'%s\', name \'%s\', and type \'%s\' was found.'
% (creative['id'], creative['name'], creative['Creative_Type']))
print
print 'Number of results found: %s' % len(creatives)
|
nicholasserra/sentry
|
src/sentry/api/endpoints/project_details.py
|
Python
|
bsd-3-clause
| 8,377
| 0.002029
|
from __future__ import absolute_import
import logging
from datetime import timedelta
from django.utils import timezone
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.decorators import sudo_required
from sentry.api.serializers import serialize
from sentry.models import (
AuditLogEntryEvent, Group, GroupStatus, Project, ProjectStatus
)
from sentry.plugins import plugins
from sentry.tasks.deletion import delete_project
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('GetProject')
def get_project_scenario(runner):
runner.request(
method='GET',
path='/projects/%s/%s/' % (
runner.org.slug, runner.default_project.slug)
)
@scenario('DeleteProject')
def delete_project_scenario(runner):
with runner.isolated_project('Plain Proxy') as project:
runner.request(
method='DELETE',
path='/projects/%s/%s/' % (
runner.org.slug, project.slug)
)
@scenario('UpdateProject')
def update_project_scenario(runner):
with runner.isolated_project('Plain Proxy') as project:
runner.request(
method='PUT',
path='/projects/%s/%s/' % (
runner.org.slug, project.slug),
data={
'name': 'Plane Proxy',
'slug': 'plane-proxy',
'options': {
'sentry:origins': 'http://example.com\nhttp://example.invalid',
}
}
)
def clean_newline_inputs(value):
result = []
for v in value.split('\n'):
v = v.lower().strip()
if v:
result.append(v)
return result
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('name', 'slug')
class ProjectDetailsEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def _get_unresolved_count(self, project):
queryset = Group.objects.filter(
status=GroupStatus.UNRESOLVED,
project=project,
)
resolve_age = project.get_option('sentry:resolve_age', None)
if resolve_age:
queryset = queryset.filter(
last_seen__gte=timezone.now() - timedelta(hours=int(resolve_age)),
)
return queryset.count()
@attach_scenarios([get_project_scenario])
def get(self, request, project):
"""
Retrieve a Project
``````````````````
Return details on an individual project.
:pparam string organization_slug: the slug of the organization the
project belongs to.
:pparam string project_slug: the slug of the project to delete.
:auth: required
"""
active_plugins = [
{
'name': plugin.get_title(),
'id': plugin.slug,
}
for plugin in plugins.configurable_for_project(project, version=None)
if plugin.is_enabled(project)
and plugin.has_project_conf()
]
data = serialize(project, request.user)
data['options'] = {
'sentry:origins': '\n'.join(project.get_option('sentry:origins', ['*']) or []),
'sentry:resolve_age': int(project.g
|
et_option('sentry:resolve_age', 0)),
'sentry:scrub_data': bool(project.get_option('sentry:scrub_data', True)),
'sentry:scrub_defaults': bool(project.get_option('sentry:scrub_defaults', True)),
'sentry:sensitive_fields': project.get_option('sentry:sensitive_fields', [
|
]),
}
data['activePlugins'] = active_plugins
data['team'] = serialize(project.team, request.user)
data['organization'] = serialize(project.organization, request.user)
include = set(filter(bool, request.GET.get('include', '').split(',')))
if 'stats' in include:
data['stats'] = {
'unresolved': self._get_unresolved_count(project),
}
return Response(data)
@attach_scenarios([update_project_scenario])
@sudo_required
def put(self, request, project):
"""
Update a Project
````````````````
Update various attributes and configurable settings for the given
project. Only supplied values are updated.
:pparam string organization_slug: the slug of the organization the
project belongs to.
:pparam string project_slug: the slug of the project to delete.
:param string name: the new name for the project.
:param string slug: the new slug for the project.
:param object options: optional options to override in the
project settings.
:auth: required
"""
serializer = ProjectSerializer(project, data=request.DATA, partial=True)
if serializer.is_valid():
project = serializer.save()
options = request.DATA.get('options', {})
if 'sentry:origins' in options:
project.update_option(
'sentry:origins',
clean_newline_inputs(options['sentry:origins'])
)
if 'sentry:resolve_age' in options:
project.update_option('sentry:resolve_age', int(options['sentry:resolve_age']))
if 'sentry:scrub_data' in options:
project.update_option('sentry:scrub_data', bool(options['sentry:scrub_data']))
if 'sentry:scrub_defaults' in options:
project.update_option('sentry:scrub_defaults', bool(options['sentry:scrub_defaults']))
if 'sentry:sensitive_fields' in options:
project.update_option(
'sentry:sensitive_fields',
[s.strip().lower() for s in options['sentry:sensitive_fields']]
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_EDIT,
data=project.get_audit_log_data(),
)
data = serialize(project, request.user)
data['options'] = {
'sentry:origins': '\n'.join(project.get_option('sentry:origins', '*') or []),
'sentry:resolve_age': int(project.get_option('sentry:resolve_age', 0)),
}
return Response(data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@attach_scenarios([delete_project_scenario])
@sudo_required
def delete(self, request, project):
"""
Delete a Project
````````````````
Schedules a project for deletion.
Deletion happens asynchronously and therefor is not immediate.
However once deletion has begun the state of a project changes and
will be hidden from most public views.
:pparam string organization_slug: the slug of the organization the
project belongs to.
:pparam string project_slug: the slug of the project to delete.
:auth: required
"""
if project.is_internal_project():
return Response('{"error": "Cannot remove projects internally used by Sentry."}',
status=status.HTTP_403_FORBIDDEN)
logging.getLogger('sentry.deletions').info(
'Project %s/%s (id=%s) removal requested by user (id=%s)',
project.organization.slug, project.slug, project.id, request.user.id)
updated = Project.objects.filter(
id=project.id,
status=ProjectStatus.VISIBLE,
).update(status=ProjectStatus.PENDING_DELETION)
if updated:
delete_project.delay(object_id=project.id, countdown=3600)
self.create_audit_entry(
request=request,
organization=project.organization,
|
opennode/nodeconductor-assembly-waldur
|
src/waldur_core/core/tests/test_authentication.py
|
Python
|
mit
| 7,832
| 0.002937
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.urls import reverse
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import status, test
from rest_framework.authtoken.models import Token
from . import helpers
class TokenAuthenticationTest(test.APITransactionTestCase):
def setUp(self):
self.username = 'test'
self.password = 'secret'
self.auth_url = 'http://testserver' + reverse('auth-password')
self.test_url = 'http://testserver/api/'
get_user_model().objects.create_user(
self.username, 'admin@example.com', self.password
)
def tearDown(self):
cache.clear()
def test_user_can_authenticate_with_token(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_token_expires_based_on_user_token_lifetime(self):
user = get_user_model().objects.get(username=self.username)
configured_token_lifetime = settings.WALDUR_CORE.get(
'TOKEN_LIFETIME', timezone.timedelta(hours=1)
)
user_token_lifetime = configured_token_lifetime - timezone.timedelta(seconds=40)
user.token_lifetime = user_token_lifetime.seconds
user.save()
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
mocked_now = timezone.now() + user_token_lifetime
with freeze_time(mocked_now):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], 'Token has expired.')
def test_token_creation_time_is_updated_on_every_request(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
created1 = Token.objects.values_list('created', flat=True).get(key=token)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
self.client.get(self.test_url)
created2 = Token.objects.values_list('created', flat=True).get(key=token)
self.assertTrue(created1 < created2)
def test_account_is_blocked_after_five_failed_attempts(self):
for _ in range(5):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': 'WRONG'}
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# this one should fail with a different error message
self.client.post(
self.auth_url, data={'username': self.username, 'password': 'WRONG'}
)
self.assertEqual(
response.data['detail'], 'Username is locked out. Try in 10 minutes.'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_expired_token_is_recreated_on_successful_authentication(self):
user = get_user_model().objects.get(username=self.username)
self.assertIsNotNone(user.token_lifetime)
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token1 = response.data['token']
mocked_now = timezone.now() + timezone.timedelta(seconds=user.token_lifetime)
with freeze_time(mocked_now):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
token2 = response.data['token']
self.assertNotEqual(token1, token2)
def test_not_expired_token_creation_time_is_updated_on_authentication(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token1 = response.data['token']
created1 = Token.objects.values_list('created', flat=True).get(key=token1)
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
token2 = response.data['token']
created2 = Token.objects.values_list('created', flat=True).get(key=token2)
self.assertEqual(token1, token2)
self.assertTrue(created1 < created2)
def test_token_never_expires_if_token_lifetime_is_none(self):
user = get_user_model().objects.get(username=self.username)
user.token_lifetime = None
user.save()
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
original_token = response.data['token']
year_ahead = timezone.now() + timezone.timedelta(days=365)
with freeze_time(year_ahead):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
|
token_in_a_year = response.data['token']
|
self.assertEqual(original_token, token_in_a_year)
def test_token_created_date_is_refreshed_even_if_token_lifetime_is_none(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = get_user_model().objects.get(username=self.username)
original_token_lifetime = user.token_lifetime
original_created_value = user.auth_token.created
user.token_lifetime = None
user.save()
last_refresh_time = timezone.now() + timezone.timedelta(
seconds=original_token_lifetime
)
with freeze_time(last_refresh_time):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
user.auth_token.refresh_from_db()
self.assertTrue(user.auth_token.created > original_created_value)
user.token_lifetime = original_token_lifetime
user.save()
with freeze_time(last_refresh_time):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@helpers.override_waldur_core_settings(AUTHENTICATION_METHODS=[])
def test_authentication_fails_if_local_signin_is_disabled(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertTrue(b'Authentication method is disabled.' in response.content)
|
ayseyo/oclapi
|
django-nonrel/ocl/integration_tests/tests/openmrs_mapping_validation.py
|
Python
|
mpl-2.0
| 1,429
| 0.003499
|
from django.core.urlresolvers import reverse
from mappings.tests import MappingBaseTest
from mappings.validation_messages import OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS
from oclapi.models import CUSTOM_VALIDATION_SCHEMA_OPENMRS
from test_helper.base import create_user, create_source, create_concept
class OpenMRSMappingCreateTest(MappingBaseTest):
def test_create_mapping_duplicate_mapping_between_two_concepts(self):
source = create_source(self.user1, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
(concept1, _) = create_concept(user=self.user1, source=source)
(concept2, _) = create_concept(user=self.
|
user1, source=source)
self.client.login(username='user1
|
', password='user1')
kwargs = {
'source': source.mnemonic
}
mapping1 = {
'from_concept_url': concept1.url,
'to_concept_url': concept2.url,
'map_type': 'Same As'
}
mapping2 = {
'from_concept_url': concept1.url,
'to_concept_url': concept2.url,
'map_type': 'Narrower Than'
}
self.client.post(reverse('mapping-list', kwargs=kwargs), mapping1)
response = self.client.post(reverse('mapping-list', kwargs=kwargs), mapping2)
self.assertEquals(response.status_code, 400)
self.assertEquals(response.data, {"errors": OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS})
|
Trust-Code/python-cnab
|
tests/test_registro.py
|
Python
|
mit
| 4,409
| 0.000454
|
import unittest
from unittest import skip
from decimal import Decimal
from cnab240 import errors
from cnab240.bancos import itau
from tests.data import get_itau_data_from_file
class TestRegistro(unittest.TestCase):
def setUp(self):
itau_data = get_itau_data_from_file()
self.header_arquivo = itau_data['header_arquivo']
self.seg_p = itau_data['seg_p1']
self.seg_p_str = itau_data['seg_p1_str']
self.seg_q = itau_data['seg_q1']
self.seg_q_str = itau_data['seg_q1_str']
def test_leitura_campo_num_decimal(self):
self.assertEqual(self.seg_p.valor_titulo, Decimal('10
|
0.00'))
def test_escrita_campo_num_decimal(self):
# aceitar s
|
omente tipo Decimal
with self.assertRaises(errors.TipoError):
self.seg_p.valor_titulo = 10.0
with self.assertRaises(errors.TipoError):
self.seg_p.valor_titulo = ''
# Testa se as casas decimais estao sendo verificadas
with self.assertRaises(errors.NumDecimaisError):
self.seg_p.valor_titulo = Decimal('100.2')
with self.assertRaises(errors.NumDecimaisError):
self.seg_p.valor_titulo = Decimal('1001')
with self.assertRaises(errors.NumDecimaisError):
self.seg_p.valor_titulo = Decimal('1.000')
# verifica se o numero de digitos esta sendo verificado
with self.assertRaises(errors.NumDigitosExcedidoError):
self.seg_p.valor_titulo = Decimal('10000000008100.21')
# armazemamento correto de um decimal
self.seg_p.valor_titulo = Decimal('2.13')
self.assertEqual(self.seg_p.valor_titulo, Decimal('2.13'))
def test_leitura_campo_num_int(self):
self.assertEqual(self.header_arquivo.controle_banco, 341)
def test_escrita_campo_num_int(self):
# aceitar somente inteiros (int e long)
with self.assertRaises(errors.TipoError):
self.header_arquivo.controle_banco = 10.0
with self.assertRaises(errors.TipoError):
self.header_arquivo.controle_banco = ''
# verifica se o numero de digitos esta sendo verificado
with self.assertRaises(errors.NumDigitosExcedidoError):
self.header_arquivo.controle_banco = 12345678234567890234567890
with self.assertRaises(errors.NumDigitosExcedidoError):
self.header_arquivo.controle_banco = 1234
# verifica valor armazenado
self.header_arquivo.controle_banco = 5
self.assertEqual(self.header_arquivo.controle_banco, 5)
def test_leitura_campo_alfa(self):
self.assertEqual(self.header_arquivo.cedente_nome,
'TRACY TECNOLOGIA LTDA ME')
@skip
def test_escrita_campo_alfa(self):
# Testa que serao aceitos apenas unicode objects
with self.assertRaises(errors.TipoError):
self.header_arquivo.cedente_nome = 'tracy'
# Testa que strings mais longas que obj.digitos nao serao aceitas
with self.assertRaises(errors.NumDigitosExcedidoError):
self.header_arquivo.cedente_convenio = '123456789012345678901'
# Testa que o valor atribuido foi guardado no objeto
self.header_arquivo.cedente_nome = 'tracy'
self.assertEqual(self.header_arquivo.cedente_nome, 'tracy')
def test_fromdict(self):
header_dict = self.header_arquivo.todict()
header_arquivo = itau.registros.HeaderArquivo(**header_dict)
self.assertEqual(header_arquivo.cedente_nome,
'TRACY TECNOLOGIA LTDA ME')
self.assertEqual(header_arquivo.nome_do_banco, 'BANCO ITAU SA')
def test_necessario(self):
self.assertTrue(self.seg_p)
seg_p2 = itau.registros.SegmentoP()
self.assertFalse(seg_p2.necessario())
seg_p2.controle_banco = 33
self.assertFalse(seg_p2.necessario())
seg_p2.vencimento_titulo = 10102012
self.assertTrue(seg_p2.necessario())
def test_unicode(self):
def unicode_test(seg_instance, seg_str):
seg_gen_str = str(seg_instance)
self.assertEqual(len(seg_gen_str), 240)
self.assertEqual(len(seg_str), 240)
self.assertEqual(seg_gen_str, seg_str)
unicode_test(self.seg_p, self.seg_p_str)
unicode_test(self.seg_q, self.seg_q_str)
if __name__ == '__main__':
unittest.main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/asyncio/selector_events.py
|
Python
|
gpl-3.0
| 39,441
| 0.000076
|
"""Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
__all__ = ['BaseSelectorEventLoop']
import collections
import errno
import functools
import socket
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import constants
from . import events
from . import futures
from . import selectors
from . import transports
from . import sslproto
from .coroutines import coroutine
from .log import logger
def _test_selector_event(selector, fd, event):
# Test if the selector is monitoring 'event' events
# for the file descriptor 'fd'.
try:
key = selector.get_key(fd)
except KeyError:
return False
else:
return bool(key.events & event)
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super().__init__()
if selector is None:
selector = selectors.DefaultSelector()
logger.debug('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._make_self_pipe()
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
return self._make_legacy_ssl_transport(
rawsock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
extra=extra, server=server)
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
server_side, server_hostname)
_SelectorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
# Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
# on Python 3.4 and older, when ssl.MemoryBIO is not available.
return _SelectorSslTransport(
self, rawsock, protocol, sslcontext, waiter,
server_side, server_hostname, extra, server)
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _SelectorDatagramTransport(self, sock, protocol,
address, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
self._close_self_pipe()
super().close()
if self._selector is not None:
self._selector.close()
self._selector = None
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
self.remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _process_self_data(self, data):
pass
def _read_from_self(self):
while True:
try:
data = self._ssock.recv(4096)
if not data:
break
self._process_self_data(data)
except InterruptedError:
|
continue
except BlockingIOError:
break
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (wi
|
th errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is not None:
try:
csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None):
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock, sslcontext, server)
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
conn, addr = sock.accept()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
except OSError as exc:
# There's nowhere to send the error, so just log it.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
# Some platforms (e.g. Linux keep reporting the FD as
# ready, so we remove the read handler temporarily.
# We'll try again in a while.
self.call_exception_handler({
'message': 'socket.accept() out of system resource',
'exception': exc,
'socket': sock,
})
self.remove_reader(sock.fileno())
self.call_later(constants.ACCEPT_RETRY_DELAY,
self._start_serving,
protocol_factory, sock, sslcontext, server)
else:
raise # The event loop will catch, log and ignore it.
else:
extra = {'peername': addr}
accept = self._accept_connection2(protocol_factory, conn, extra,
sslcontext, server)
self.create_task(accept)
@coroutine
def _accept_connection2(self, protocol_factory, conn, extra,
sslcontext=None, server=None):
protocol = None
transport = None
try:
protocol = protocol_factory()
waiter = futures.Future(loop=self)
if sslcontext:
transport = self._make_ssl_transport(
conn, protocol, sslcontext, waiter=waiter,
server_side=True, extra=extra, server=server)
else:
transport = self._make_socket_transport(
conn, protocol, waiter=waiter, extra=extra,
server=server)
try:
yield from waiter
except:
transport.close()
raise
# It's now up to the protocol to handle the connection.
except Exception as exc:
if self._debug:
context = {
'message': ('Error on transport creation '
'for incoming connect
|
quartzmo/gcloud-ruby
|
google-cloud-bigquery-data_transfer/synth.py
|
Python
|
apache-2.0
| 3,453
| 0.001448
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v1_library = gapic.ruby_library(
'bigquery/datatransfer', 'v1',
artman_output_name='google-cloud-ruby/google-cloud-bigquer
|
ydatatransfer',
config_path='artman_bigquerydatatransfer.yaml'
)
s.copy(v1_library / 'acceptance')
s.copy(v1_library / 'lib')
s.copy(v1_library / 'test')
s.copy(v1_library / 'README.md')
s.copy(v1_library / 'LICENSE')
s.copy(v1_library / '.gitignore')
s.copy(v1_library / '.yardopts')
s.copy(v1_library / 'google-cloud-bigquery-data_transfer.gemspec', merge=ruby.merge_ge
|
mspec)
# PERMANENT: Use custom credentials env variable names
s.replace(
'lib/google/cloud/bigquery/data_transfer/v1/credentials.rb',
'BIGQUERYDATATRANSFER_KEYFILE', 'DATA_TRANSFER_KEYFILE')
s.replace(
'lib/google/cloud/bigquery/data_transfer/v1/credentials.rb',
'BIGQUERYDATATRANSFER_CREDENTIALS', 'DATA_TRANSFER_CREDENTIALS')
# https://github.com/googleapis/gapic-generator/issues/2179
# https://github.com/googleapis/gapic-generator/issues/2196
s.replace(
[
'README.md',
'lib/google/cloud/bigquery/data_transfer.rb',
'lib/google/cloud/bigquery/data_transfer/v1.rb'
],
'\\[Product Documentation\\]: https://cloud\\.google\\.com/bigquerydatatransfer\n',
'[Product Documentation]: https://cloud.google.com/bigquery/transfer/\n')
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
'lib/google/cloud/**/*.rb',
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/bigquery/data_transfer/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
|
coxley/trigger
|
trigger/utils/notifications/events.py
|
Python
|
bsd-3-clause
| 4,682
| 0.001495
|
# -*- coding: utf-8 -*-
"""
Event objects for the notification system.
These are intended to be used within event handlers such as
`~trigger.utils.notifications.handlers.email_handler()`.
If not customized within :setting:`NOTIFICATION_HANDLERS`, the default
notification type is an `~trigger.utils.notification.events.EmailEvent` that is
handled by `~trigger.utils.notifications.handlers.email_handler`.
"""
__author__ = 'Jathan McCollum'
__maintainer__ = 'Jathan McCollum'
__email__ = 'jathan.mccollum@teamaol.com'
__copyright__ = 'Copyright 2012-2012, AOL Inc.'
import socket
from trigger.conf import settings
# Exports
__all__ = ('Event', 'Notification', 'EmailEvent')
# Classes
class Event(object):
"""
Base class for events.
It just populates the attribute dict with all keyword arguments thrown at
the constructor.
All ``Event`` objects are expected to have a ``.handle()`` method that
willl be called by a handler function. Any user-defined event objects must
have a working ``.handle()`` method that returns ``True`` upon success or
``None`` upon a failure when handling the event passed to it.
If you specify ``required_args``, these must have a value other than
``None`` when passed to the constructor.
"""
required_args = ()
def __init__(self, **kwargs):
self.__dict__.update(kwargs) # Brute force wins!
local_vars = self.__dict__
for var, value in local_vars.iteritems():
if var in self.required_args and value is None:
raise SyntaxError('`%s` is a required argument' % var)
def __repr__(self):
return '<%s>' % self.__class__.__name__
def handle(self):
raise NotImplementedError('Define me in your subclass!')
class Notification(Event):
"""
Base class for notification events.
The ``title`` and ``message`` arguments are the only two that are required.
This is to simplify the interface when sending notifications and will cause
notifications to send from the default ``sender to the default
``recipients`` that are specified withing the global settings.
If ``sender`` or ``recipients`` are specified, they will override the
global defaults.
Note that this base class has no ``.handle()`` method defined.
:param title:
The title/subject of the notification
:param message:
The message/body of the notification
:param sender:
A string representing the sender of the notification (such as an email
address or a hostname)
:param recipients:
An iterable containing strings representing the recipients of of the
notification (such as a list of emails or hostnames)
:param event_status:
Whether this event is a `failure` or a `success`
"""
required_args = ('title', 'message')
status_map = {
'success': settings.SUCCESS_RECIPIENTS,
'failure': settings.FAILURE_RECIPIENTS,
}
default_sender = settings.NOTIFICATION_SENDER
def __init__(self, title=None, message=None, sender=None, recipients=None,
event_status='failure', **kwargs):
self.title = title
self.message = message
# If the sender isn't specified, use the global sender
if sender is None:
sender = self.default_sender
self.sender = sender
# We want to know whether we're sending a failure or success email
if event_status not in self.status_map:
raise SyntaxError('`event_status` must be in `status_map`')
self.event_status = event_status
# If recipients aren't specified, use the global success/failure
# recipients
if recipients is None:
recipients = self.status_map.get(self.event_status)
self.recipients = recipients
super(Notification, self).__init__(**kwargs)
self.kwargs = kwargs
class EmailEvent(Notification):
"""
An email notification event.
"""
default_sender = settings.EMAIL_SENDER
status_map = {
'success': settings.SUCCESS_EMAILS,
'failure': settings.FAILURE_EMAILS,
}
mailhost = 'localhost'
def handle(self):
from trig
|
ger.utils.notifications i
|
mport send_email
try:
# This should return True upon successfully sending email
e = self
return send_email(addresses=e.recipients, subject=e.title,
body=e.message, sender=e.sender,
mailhost=e.mailhost)
except Exception as err:
print 'Got exception', err
return None
|
allenlavoie/tensorflow
|
tensorflow/contrib/model_pruning/python/pruning_test.py
|
Python
|
apache-2.0
| 9,014
| 0.006989
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"do_not_prune=[conv1,conv2]", "sparsity_function_end_step=100",
"target_sparsity=0.9"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.Variable(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertAllEqual(p._spec.do_not_prune, ["conv1", "conv2"])
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
def testInitWithExternalSparsity(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5
|
)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.test_session():
weig
|
hts = variables.Variable(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.test_session() as session:
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.5, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.Variable(0.0, name="threshold")
sparsity = variables.Variable(0.51, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.test_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
# Check if the mask is the same size as the weights
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
# Weights as in testBlockMasking, but with one extra dimension.
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.test_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.Variable(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.00, name="sparsity")
# Set up pruning
p = pruning
|
kfcpaladin/sze-the-game
|
renpy/display/joystick.py
|
Python
|
mit
| 1,793
| 0.001673
|
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file was responsible for joystick support in Ren'Py, which has
# been removed, s
|
ave for a few compatibility functions.
import renpy.display
import pygame_sdl2
# Do we have a joystick enabled?
enabled = False
class JoyBehavior(renpy.display.layout.Null):
"""
This is a behavior intended for joystick calibration. If a joystick
event occurs, this returns it as a string.
"""
pass
joysticks = { }
def count():
return pygame_sdl2.joystick.get_count()
def get(n):
if n in joysticks:
return joysticks[n]
try:
joysticks[n] = pygame_sdl2.joystick.Joystick(n)
return joysticks[n]
except:
return None
|
rdo-management/neutron
|
neutron/openstack/common/eventlet_backdoor.py
|
Python
|
apache-2.0
| 4,859
| 0
|
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from neutron.openstack.common._i18n import _LI
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueErr
|
or(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_fu
|
nc):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
|
mindbody/API-Examples
|
SDKs/Python/test/test_custom_payment_method.py
|
Python
|
bsd-2-clause
| 980
| 0
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import un
|
ittest
import swagger_client
from swag
|
ger_client.models.custom_payment_method import CustomPaymentMethod # noqa: E501
from swagger_client.rest import ApiException
class TestCustomPaymentMethod(unittest.TestCase):
"""CustomPaymentMethod unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCustomPaymentMethod(self):
"""Test CustomPaymentMethod"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.custom_payment_method.CustomPaymentMethod() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
SymbiFlow/prjxray
|
fuzzers/031-cmt-mmcm/generate.py
|
Python
|
isc
| 3,572
| 0.00056
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
from prjxray.segmaker import Segmaker
from prjxray import verilog
def bitfilter(frame, word):
if frame < 28:
return False
return True
def bus_tags(segmk, ps, all_params, site):
segmk.add_site_tag(site, 'IN_USE', ps['active'])
if not ps['active']:
return
params = all_params[site]["params"]
#for k in ps:
# segmk.add_site_tag(site, 'param_' + k + '_' + str(ps[k]), 1)
for reg, invert in [
('RST', 1),
('PWRDWN', 1),
('CLKINSEL', 0),
('PSEN', 1),
('PSINCDEC', 1),
]:
opt = 'IS_{}_INVERTED'.format(reg)
if invert:
segmk.add_site_tag(site, 'ZINV_' + reg, 1 ^ ps[opt])
else:
segmk.add_site_tag(site, 'INV_' + reg, ps[opt])
for opt in ['OPTIMIZED', 'HIGH', 'LOW']:
if verilog.unquote(ps['BANDWIDTH']) == opt:
segmk.add_site_tag(site, 'BANDWIDTH.' + opt, 1)
elif verilog.unquote(ps['BANDWIDTH']) == 'LOW':
segmk.add_site_tag(site, 'BANDWIDTH.' + opt, 0)
# "INTERNAL" compensation conflicts with the CLKFBOUT2IN->CLKFBIN PIP.
# There is no telling which of these two is actually controlled by those
# bits. It is better to leave them for the PIP.
COMPENSATION_OPTS = ['ZHOLD', 'BUF_IN', 'EXTERNAL']
for opt in COMPENSATION_OPTS:
val = params["COMPENSATION"] == opt
segmk.add_site_tag(site, "COMP.{}".format(opt), val)
segmk.add_site_tag(site, "COMP.Z_{}".format(opt), not val)
opt = (verilog.unquote(ps["SS_EN"]) == "TRUE")
segmk.add_site_tag(site, "SS_EN", opt)
for param in ['CLKFBOUT_MULT_F']:
paramadj = int(ps[param])
bitstr = [int(x) for x in "{0:09b}".format(paramadj)[::-1]]
for i in range(7):
segmk.add_site_tag(site, '%s[%u]' % (param, i), bitstr[i])
for param in ['CLKOUT0_DUTY_CYCLE']:
assert ps[param][:2] == '0.', ps[param]
assert len(ps[param]) == 5
paramadj = int(ps[param][2:])
bitstr = [int(x) for x in
|
"{0:011b}"
|
.format(paramadj)[::-1]]
for i in range(10):
segmk.add_site_tag(site, '%s[%u]' % (param, i), bitstr[i])
for param, bits in [
('CLKOUT0_DIVIDE_F', 7),
('CLKOUT1_DIVIDE', 7),
('CLKOUT2_DIVIDE', 7),
('CLKOUT3_DIVIDE', 7),
('CLKOUT4_DIVIDE', 7),
('CLKOUT5_DIVIDE', 7),
('CLKOUT6_DIVIDE', 7),
('DIVCLK_DIVIDE', 6),
]:
# 1-128 => 0-127 for actual 7 bit value
paramadj = int(ps[param])
if paramadj < 4:
continue
bitstr = [int(x) for x in "{0:09b}".format(paramadj)[::-1]]
for i in range(bits):
segmk.add_site_tag(site, '%s[%u]' % (param, i), bitstr[i])
segmk.add_site_tag(
site, 'STARTUP_WAIT',
verilog.unquote(ps['STARTUP_WAIT']) == 'TRUE')
def run():
segmk = Segmaker("design.bits")
print("Loading params")
f = open("params.json")
params = json.load(f)
params = {p["site"]: p for p in params}
print("Loading tags")
f = open('params.jl', 'r')
f.readline()
for l in f:
j = json.loads(l)
bus_tags(segmk, j, params, j['site'])
segmk.compile(bitfilter=bitfilter)
segmk.write()
run()
|
EnTeQuAk/dotfiles
|
sublime-text-3/Packages/Search in Project/searchengines/grep.py
|
Python
|
unlicense
| 953
| 0.018888
|
### Start of fixing import paths
import os, sys, inspect
# realpath() with make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subforder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"subfolder")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
# Info:
# cmd_folder = os.path.dirname(os.path.abspath(__file__)) # DO NOT USE __file__ !!!
# __file__ fails if script is called i
|
n different ways on Windows
# __file__ fails if someone does os.chdir() before
# sys.argv[0] also fails because i
|
t doesn't not always contains the path
### End of fixing import paths
import base
class Grep (base.Base):
pass
engine_class = Grep
|
yaukwankiu/armor
|
tests/imageToDataTest4.py
|
Python
|
cc0-1.0
| 5,132
| 0.01286
|
# attempting the classify the charts, after armo
|
r/tests/imageToDataTest3.py
# Plan: 1. compute features and store them
# 2. classify
# 3. display
#
#sleepTime= 140000
sleepTime =0
import time
print time.asctime()
print 'sleeping now for ', sleepTime, 'seconds'
time.sleep(sleepTime)
import os
import time
import pickle
import numpy as np
from armor import pattern
dbz = pattern.DBZ
dp = pattern.dp
plt = patte
|
rn.plt
inputFolder = dp.defaultImageDataFolder + 'charts2-allinone-/'
imageFolder = dp.root + 'labLogs2/charts2_extracted/'
outputFolder = dp.root + 'labLogs2/charts2_features/'
try:
os.makedirs(outputFolder)
except:
print outputFolder, 'exists'
N = 500
L = os.listdir(inputFolder)
L = [v for v in L if v.startswith('2014') or v.startswith('2013')]
#L = os.listdir(imageFolder)
if 'MOS' in L[0]:
L = [l[:4] + l[5:7] + l[8:10] + '.' + l[11:15] for l in L]
else:
L = [l[:-4] for l in L]
L.sort()
print len(L)
print L[:10]
R = np.random.random(N)
R = (R*len(L)).astype(int)
R = [L[v] for v in R]
R[:10]
#R = [l[:4] + l[5:7] + l[8:10] + '.' + l[11:15] for l in R]
R[:10]
R = [dbz(v) for v in R]
R[:10]
"""
##############
# test case
a = R[0]
print a.dataTime
a.loadImage(rawImage=True)
a.show()
#
a.loadImage()
a.show()
#
a1 = a.connectedComponents()
a2 = a.above(51).connectedComponents()
a1.show(block=True)
a2.show(block=True)
# get the components
M1 = a1.matrix.max()
M2 = a2.matrix.max()
components1 = [(a1.matrix==v).sum() for v in range(M1+1)]
components2 = [(a2.matrix==v).sum() for v in range(M2+1)]
#components1 = sorted([(a1.matrix==v).sum() for v in range(M1+1)][1:], reverse=True)
#components2 = sorted([(a2.matrix==v).sum() for v in range(M2+1)][1:], reverse=True)
#components1 = [v for v in components1 if v>=100]
#components2 = [v for v in components2 if v>=10]
print sorted(components1, reverse=True)[1:]
print sorted(components2, reverse=True)[1:]
# get the moments
from armor.geometry import moments as mmt
HuPowers = np.array([2., 4., 6., 6., 12., 8., 12.])
HuPowers = (HuPowers)**-1
moments1 = np.array([mmt.HuMoments(a1.matrix==v)**HuPowers for v in range(len(components1))])
moments2 = np.array([mmt.HuMoments(a2.matrix==v)**HuPowers for v in range(len(components2))])
print moments1
print moments2
# defining the features
numberOfComponents = len([v for v in components1[1:] if v>=100]) # region of at least 100 pixels
volume = a1.matrix.sum() + a2.matrix.sum()
features = { 'dataTime' : a.dataTime,
'globalFeatures' : a1.globalShapeFeatures(lowerThreshold=1, upperThreshold=51,),
'localFeatures' : [a1.levelSet(v).globalShapeFeatures() for v in range(len(components1))], # this includes the "background"
}
pickle.dump(features, open('features_' + a.dataTime +'.pydump','w'))
# end test case
##############################
"""
###########
# later #
###########
count = 0
for imageName in L:
count +=1
dataTime = imageName
print dataTime
if os.path.exists(outputFolder+'features_' + dataTime +'.pydump'):
continue
a=dbz(dataTime)
a.loadImage()
a.show()
a1 = a.connectedComponents()
a2 = a.above(51).connectedComponents()
#if count < 1:
# print 'waiting for check'
# a1.show(block=True)
# print 'waiting for check'
# a2.show(block=True)
#elif count==3:
# print 'it runs from now on, no more a1.show(block=True)'
# get the components
M1 = a1.matrix.max()
M2 = a2.matrix.max()
components1 = [(a1.matrix==v).sum() for v in range(M1+1)]
components2 = [(a2.matrix==v).sum() for v in range(M2+1)]
print sorted(components1, reverse=True)[1:]
print sorted(components2, reverse=True)[1:]
# defining the features
numberOfComponents = len([v for v in components1[1:] if v>=100]) # region of at least 100 pixels
volume = a1.matrix.sum() + a2.matrix.sum()
synopsis = "volume: " + str(volume) +'\n'
synopsis += "major components: " + str(sorted(components1, reverse=True)[1:])
print synopsis
features = { 'dataTime' : a.dataTime,
'globalFeatures' : a1.globalShapeFeatures(lowerThreshold=1, upperThreshold=51,),
'localFeatures' : [a1.levelSet(v).globalShapeFeatures() for v in range(len(components1))],
'synopsis' : synopsis ,
}
pickle.dump(features, open(outputFolder+'features_' + a.dataTime +'.pydump','w'))
"""
for a in R:
a.imagePath = outputFolder+a.dataTime+'.png'
if os.path.exists(a.imagePath):
continue
a.loadImage()
b = a.copy()
b.loadImage(rawImage=True)
plt.subplot(121)
plt.imshow(b.matrix, origin='lower')
plt.subplot(122)
plt.imshow(a.matrix, origin='lower')
plt.title(a.dataTime)
plt.savefig(a.imagePath)
plt.show(block=False)
print 'sleeping 2 seconds'
time.sleep(2)
if N>=100:
a.matrix=np.array([0]) #free up some memory
"""
|
Pulgama/supriya
|
supriya/nonrealtime/NodeTransition.py
|
Python
|
mit
| 4,163
| 0.001441
|
import supriya.commands
import supriya.realtime
from supriya.system.SupriyaValueObject import SupriyaValueObject
class NodeTransition(SupriyaValueObject):
"""
A non-realtime state transition.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Internals"
__slots__ = ("_source", "_target", "_action")
### INITIALIZER ###
def __init__(self, source=None, action=None, target=None):
if action is not None:
action = supriya.AddAction.from_expr(action)
assert isinstance(action, supriya.AddAction)
assert source is not target
if action is None:
assert source is not None
assert target is None
self._action = action
self._source = source
self._target = target
### PRIVATE METHODS ###
def _free_node(self, nodes_to_children, nodes_to_parents):
node = self.source
for child in nodes_to_children.get(node, ()) or ():
self.free_node(child, nodes_to_children, nodes_to_paren
|
ts)
parent = nodes_to_parents.get(node, None)
if node in nodes_to_children:
del nodes_to_children[node]
|
if node in nodes_to_parents:
del nodes_to_parents[node]
if not parent:
return
children = list(nodes_to_children[parent])
children.remove(node)
nodes_to_children[parent] = tuple(children) or None
def _move_node(self, nodes_to_children, nodes_to_parents):
assert self.target in nodes_to_children
if self.source not in nodes_to_children:
nodes_to_children[self.source] = None
old_parent = nodes_to_parents.get(self.source, None)
if old_parent:
children = list(nodes_to_children[old_parent])
children.remove(self.source)
nodes_to_children[old_parent] = tuple(children) or None
if self.action in (supriya.AddAction.ADD_AFTER, supriya.AddAction.ADD_BEFORE):
new_parent = nodes_to_parents[self.target]
else:
new_parent = self.target
nodes_to_parents[self.source] = new_parent
children = list(nodes_to_children.get(new_parent, None) or ())
if self.action == supriya.AddAction.ADD_TO_HEAD:
children.insert(0, self.source)
elif self.action == supriya.AddAction.ADD_TO_TAIL:
children.append(self.source)
elif self.action == supriya.AddAction.ADD_BEFORE:
index = children.index(self.target)
children.insert(index, self.source)
elif self.action == supriya.AddAction.ADD_AFTER:
index = children.index(self.target) + 1
children.insert(index, self.source)
nodes_to_children[new_parent] = tuple(children) or None
def _to_request(self, id_mapping):
node_id_pair = (id_mapping[self.source], id_mapping[self.target])
if self.action == supriya.AddAction.ADD_TO_HEAD:
request_class = supriya.commands.GroupHeadRequest
elif self.action == supriya.AddAction.ADD_TO_TAIL:
request_class = supriya.commands.GroupTailRequest
elif self.action == supriya.AddAction.ADD_BEFORE:
request_class = supriya.commands.NodeBeforeRequest
elif self.action == supriya.AddAction.ADD_AFTER:
request_class = supriya.commands.NodeAfterRequest
request = request_class(node_id_pairs=[node_id_pair])
return request
### PUBLIC METHODS ###
def apply_transform(self, nodes_to_children, nodes_to_parents):
if self.action is None:
self._free_node(nodes_to_children, nodes_to_parents)
else:
self._move_node(nodes_to_children, nodes_to_parents)
@classmethod
def free_node(cls, node, nodes_to_children, nodes_to_parents):
action = cls(source=node)
action.apply_transform(nodes_to_children, nodes_to_parents)
### PUBLIC PROPERTIES ###
@property
def action(self):
return self._action
@property
def source(self):
return self._source
@property
def target(self):
return self._target
|
siddhantgoel/streaming-form-data
|
examples/tornado/stream_request_body.py
|
Python
|
mit
| 1,487
| 0
|
import os.path
import tempfile
from time import time
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget, ValueTarget
|
from tornado.ioloop import IOLoop
from tornado.web import Application,
|
RequestHandler, stream_request_body
one_hundred_gb = 100 * 1024 * 1024 * 1024
@stream_request_body
class UploadHandler(RequestHandler):
def prepare(self):
self.request.connection.set_max_body_size(one_hundred_gb)
name = 'uploaded-file-tornado-{}.dat'.format(int(time()))
self.value = ValueTarget()
self.file_ = FileTarget(os.path.join(tempfile.gettempdir(), name))
self._parser = StreamingFormDataParser(headers=self.request.headers)
self._parser.register('name', self.value)
self._parser.register('file', self.file_)
def data_received(self, chunk):
self._parser.data_received(chunk)
def post(self):
self.render(
'upload.html', name=self.value.value, filename=self.file_.filename
)
class IndexHandler(RequestHandler):
def get(self):
self.render('index.html')
def main():
handlers = [(r'/', IndexHandler), (r'/upload', UploadHandler)]
settings = dict(debug=True, template_path=os.path.dirname(__file__))
app = Application(handlers, **settings)
app.listen(9999, address='localhost')
IOLoop().current().start()
if __name__ == '__main__':
print('Listening on localhost:9999')
main()
|
kevgraham7/toolbox
|
python/samples/git-tools/util/log_setup.py
|
Python
|
apache-2.0
| 1,033
| 0.001936
|
import logging
class BorgSingleton:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
class LoggerSetup(BorgSingleton):
"""Logger setup convenience class"""
DEFAULT_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, logger_name, log_level=logging.INFO, log_file=None, log_format=DEFAULT_FORMAT):
BorgSingleton.__init__(self)
self.__logger_name = logger_name
logger = logging.getLogger(self.__logger_name)
logger.setLevel(log_level)
|
if log_file:
self.add_log_file(log_file, log_level, log_format)
def add_log_file(self, log_file, level=logging.INFO, log_format=DEFAULT_FORMAT):
file_handler
|
= logging.FileHandler(log_file)
file_handler.setLevel(level)
file_handler.setFormatter(logging.Formatter(log_format))
logging.getLogger(self.__logger_name).addHandler(file_handler)
def get_logger(self):
return logging.getLogger(self.__logger_name)
|
nitely/Spirit
|
spirit/comment/bookmark/urls.py
|
Python
|
mit
| 273
| 0
|
# -*- coding: utf-8 -
|
*-
from django.conf.urls import re_path
from . import views
app_name = 'bookmark'
urlpatterns = [
re_path(r'^(?P<topic_id>[0-9]+)/create/$', views.create, name='cre
|
ate'),
re_path(r'^(?P<topic_id>[0-9]+)/find/$', views.find, name='find'),
]
|
verma-varsha/zulip
|
zerver/management/commands/enqueue_digest_emails.py
|
Python
|
apache-2.0
| 728
| 0.002747
|
from __future__ import absolute_import
import datetime
from typing import Any, List
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.timezone import now as timezone_now
fr
|
om zerver.lib.digest import enqueue_emails, DIGEST_CUTOFF
from zerver.lib.logging_util import create_logger
## Logging setup ##
logger = create_logger(__name__, settings.DIGEST_LOG_PATH, 'DEBUG')
class Command(BaseCommand):
help = """Enqueue digest emails for users that haven't checked the app
in a while.
"""
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
|
cutoff = timezone_now() - datetime.timedelta(days=DIGEST_CUTOFF)
enqueue_emails(cutoff)
|
obi-two/Rebelion
|
data/scripts/templates/object/mobile/shared_space_rebel_tier3_ezkiel.py
|
Python
|
mit
| 452
| 0.04646
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATION
|
S MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_space_rebel_tier3_ezkiel.iff"
result.attribute_template_id = 9
result.stfName("npc_name","ishi_tib_base_male")
#### BEGI
|
N MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
renzon/fatec-script
|
backend/appengine/pythonicos.py
|
Python
|
mit
| 1,283
| 0.003118
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from random import shuffle
class Carta():
def __init__(self, numero, naipe):
self.numero = numero
self.naipe = naipe
def __repr__(self):
return '%s de %s' % (self.numero, self.naipe)
class Baralho():
def __init__(self):
s
|
elf._cartas = [Carta(numero, naipe) for numero in 'As 1 2 3 4 5 6 7 8 9 10 Q J K'.split()
for naipe in 'Ouros Espadas Copas Paus'.split()]
def __getitem__(self, index):
return self._cartas[index]
def __setitem__(self, key, value):
self._cartas[key] = value
def __len__(self):
return len(self._cartas)
print Carta('As', 'P
|
aus')
baralho = Baralho()
baralho[55] = Carta('As', 'Paus')
shuffle(baralho)
for carta in baralho:
print carta
print baralho[0]
class Vetor():
def __init__(self, x, y):
self.y = y
self.x = x
def __repr__(self):
return '(%s, %s)' % (self.x, self.y)
def __add__(self, other):
return Vetor(self.x + other.x, self.y + other.y)
def __eq__(self, other):
return self.x==other.x and self.y==other.y
vetor1 = Vetor(1, 1)
vetor2 = Vetor(1, 1)
print vetor1 + vetor2
print vetor1 == vetor2
|
tensorflow/federated
|
tensorflow_federated/python/learning/model.py
|
Python
|
apache-2.0
| 12,401
| 0.002903
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""Abstractions for models used in federated learning."""
import abc
from typing import Any, Callable, OrderedDict, Sequence
import attr
import tensorflow as tf
MODEL_ARG_NAME = 'x'
MODEL_LABEL_NAME = 'y'
MetricFinalizersType = OrderedDict[str, Callable[[Any], Any]]
@attr.s(frozen=True, slots=True, eq=False)
class BatchOutput():
"""A structure that holds the output of a `tff.learning.Model`.
Note: All fields are optional (may be None).
Attributes:
loss: The scalar mean loss on the examples in the batch. If the model has
multiple losses, it is the sum of all the individual losses.
predictions: Tensor of predictions on the examples. The first dimension must
be the same size (the size of the batch).
num_examples: Number of examples seen in the batch.
"""
loss = attr.ib()
predictions = attr.ib()
num_examples = attr.ib()
class Model(object, metaclass=abc.ABCMeta):
"""Represents a model for use in TensorFlow Federated.
Each `Model` will work on a set of `tf.Variables`, and each method should be
a computation that can be implemented as a `tf.function`; this implies the
class should essentially be stateless from a Python perspective, as each
method will generally only be traced once (per set of arguments) to create the
corresponding TensorFlow graph functions. Thus, `Model` instances should
behave as expected in both eager and graph (TF 1.0) usage.
In general, `tf.Variables` may be either:
* Weights, the variables needed to make predictions with the model.
* Local variables, e.g. to accumulate aggregated metrics across
calls to forward_pass.
The weights can be broken down into trainable variables (variables
that can and should be trained using gradient-based methods), and
non-trainable variables (which could include fixed pre-trained layers,
or static model data). These variables are provided via the
`trainable_variables`, `non_trainable_variables`, and `local_variables`
properties, and must be initialized by the user of the `Model`.
In federated learning, model weights will generally be provided by the
server, and updates to trainable model variables will be sent back to the
server. Local variables are not transmitted, and are instead initialized
locally on the device, and then used to produce `aggregated_outputs` which
are sent to the server.
All `tf.Variables` should be introduced in `__init__`; this could move to a
`build` method more inline with Keras (see
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) in
the future.
"""
@abc.abstractproperty
def trainable_variables(self) -> Sequence[tf.Variable]:
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def non_trainable_variables(self) -> Sequence[tf.Variable]:
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def local_variables(self) -> Sequence[tf.Variable]:
"""An iterable of `tf.Variable` objects, see class comment for details."""
pass
@abc.abstractproperty
def input_spec(self):
"""The type specification of the `batch_input` parameter for `forward_pass`.
A nested structure of `tf.TensorSpec` objects, that matches the structure of
arguments that will be passed as the `batch_input` argument of
`forward_pass`. The tensors must include a batch dimension as the first
dimension, but the batch dimension may be undefined.
If `input_spec` is an instance of `collections.abc.Mapping`, this mapping
must have an `{}` element which corresponds to the input to
`predict_on_batch` and a `{}` element containing the batch labels.
Otherwise the first positional element of `input_spec` must correspond to
the input to `predict_on_batch`, and the second positional element the
labels.
Similar in spirit to `tf.keras.models.Model.input_spec`.
""".format(MODEL_ARG_NAME, MODEL_LABEL_NAME)
pass
@abc.abstractmethod
def forward_pass(self, batch_input, training=True) -> BatchOutput:
"""Runs the forward pass and returns results.
This method must be serializable in a `tff.tf_computation` or other backend
decorator. Any pure-Python or unserializable logic will not be runnable in
the federated system.
This method should not modify any variables that are part of the model
parameters, that is, variables that influence the predictions (exceptions
being updated, rather than learned, parameters such as BatchNorm means and
variances). Rather, this is done by the training loop. However, this method
may update aggregated metrics computed across calls to `forward_pass`; the
final values of such metrics can be accessed via `aggregated_outputs`.
Uses in TFF:
* To implement model evaluation.
* To implement federated gradient descent and other
non-Federated-Averaging algorithms, where we want the model to run the
forward pass and update metrics, but there is no optimizer
(we might only compute gradients on the returned loss).
* To implement Federated Averaging.
Args:
batch_input: A nested structure that matches the structure of
`Model.input_spec` and each tensor in `batch_input` satisfies
`tf.TensorSpec.is_compatible_with()` for the corresponding
`tf.TensorSpec` in `Model.input_spec`.
training: If `True`, run the training forward pass, otherwise, run in
evaluation mode. The semantics are generally the same as the `training`
argument to `keras.Model.call`; this might e.g. influence how dropout or
batch normalization is handled.
Returns:
A `BatchOutput` object. The object must include the `loss` tensor if the
model will be trained via a gradient-based algorithm.
"""
pass
@abc.abstractmethod
def predict_on_batch(self, batch_input, training=True):
"""Performs inference on a batch, produces predictions.
Unlike `forward_pass`, this function must _not_ mutate any variables
(including metrics) when `training=False`, as it must support conversion to
a TFLite flatbuffer for inference. When `training=True` this supports cases
such as BatchNorm mean and variance updates or dropout. In many c
|
ases this
method will be called from `forward_pass` to produce the predictions, and
`forward_pass` will further compute loss and metrics updates.
Note that this im
|
plies `batch_input` will have a *different* signature for
`predict_on_batch` than for `forward_pass`; see the args section of this
documentation for a specification of the relationship.
Args:
batch_input: A nested structure of tensors that holds the prediction
inputs for the model. The structure must match the first element of the
structure of `Model.input_spec`, or the '{}' key if `Model.input_spec`
is a mapping. Each tensor in `x` satisfies
`tf.TensorSpec.is_compatible_with()` for the corresponding
`tf.TensorSpec` in `Model.input_spec`.
training: If `True`, allow updatable variables (e.g. BatchNorm variances
and means) to be updated. Otherwise, run in inferece only mode with no
variables mutated. The semantics are generally the same as the
`training` argument to `keras.Model.`; this might e.g. influenc
|
utn-frm-si/reservas
|
app_reservas/urls.py
|
Python
|
mit
| 4,160
| 0
|
# coding=utf-8
from djan
|
go.conf.urls import url
from .views import (
AliTemplateView,
AliVideoconferenciasDetailView,
AreaDetailView,
AulaD
|
etailView,
CuerpoDetailView,
IndexView,
LaboratorioDetailView,
LaboratorioInformaticoDetailView,
LaboratorioInformaticoListView,
NivelDetailView,
recurso_eventos_json,
RecursoAliDetailView,
SolicitudAliReclamosSugerencias,
SolicitudAulaView,
SolicitudInstalacionSoftwareView,
SolicitudLaboratorioInformaticoView,
SolicitudMaterialMultimediaView,
TipoLaboratorioDetailView,
TipoRecursoAliDetailView,
TvCuerposListView,
TvVisorCuerposDetailView,
TvVisorDetailView,
)
urlpatterns = [
url(
r'^$',
IndexView.as_view(),
name='index'
),
url(
r'^cuerpo/(?P<numero>\d+)/$',
CuerpoDetailView.as_view(),
name='cuerpo_detalle'
),
url(
r'^cuerpo/(?P<numero_cuerpo>\d+)/nivel/(?P<numero_nivel>-?\d+)/$',
NivelDetailView.as_view(),
name='nivel_detalle'
),
url(
r'^aula/(?P<pk>\d+)/$',
AulaDetailView.as_view(),
name='aula_detalle'
),
url(
r'^area/(?P<slug>[-\w]+)/$',
AreaDetailView.as_view(),
name='area_detalle'
),
url(
r'^recurso/(?P<pk>\d+)/eventos/$',
recurso_eventos_json,
name='recurso_eventos_json'
),
url(
r'^ali/$',
AliTemplateView.as_view(),
name='ali_index'
),
url(
r'^ali/videoconferencias/$',
AliVideoconferenciasDetailView.as_view(),
name='ali_videoconferencias_detalle'
),
url(
r'^laboratorios/informatica/$',
LaboratorioInformaticoListView.as_view(),
name='laboratorio_informatico_listado'
),
url(
r'^laboratorio/informatica/(?P<alias>[A-Za-z0-9]+)/$',
LaboratorioInformaticoDetailView.as_view(),
name='laboratorio_informatico_detalle'
),
url(
r'^laboratorios/(?P<slug>[-\w]+)/$',
TipoLaboratorioDetailView.as_view(),
name='tipo_laboratorio_detalle'
),
url(
r'^laboratorio/(?P<tipo>[A-Za-z0-9]+)/(?P<alias>[A-Za-z0-9]+)/$',
LaboratorioDetailView.as_view(),
name='laboratorio_detalle'
),
url(
r'^ali/(?P<slug>[-\w]+)/$',
TipoRecursoAliDetailView.as_view(),
name='tipo_recurso_ali_detalle'
),
url(
r'^ali/(?P<tipo>[-\w]+)/(?P<identificador>[A-Za-z0-9_-]+)/$',
RecursoAliDetailView.as_view(),
name='recurso_ali_detalle'
),
url(
r'^solicitud/ali/reclamos_sugerencias/$',
SolicitudAliReclamosSugerencias.as_view(),
name='solicitud_ali_reclamos_sugerencias'
),
url(
r'^solicitud/aula/$',
SolicitudAulaView.as_view(),
name='solicitud_aula'
),
url(
r'^solicitud/instalacion_software/$',
SolicitudInstalacionSoftwareView.as_view(),
name='solicitud_instalacion_software'
),
url(
r'^solicitud/laboratorio/informatica/$',
SolicitudLaboratorioInformaticoView.as_view(),
name='solicitud_laboratorio_informatico'
),
url(
r'^solicitud/material_multimedia/$',
SolicitudMaterialMultimediaView.as_view(),
name='solicitud_material_multimedia'
),
url(
r'^tv/cuerpos/$',
TvCuerposListView.as_view(),
name='tv_cuerpos'
),
url(
r'^tv/visor/(?P<slug>[-\w]+)/$',
TvVisorDetailView.as_view(),
name='tv_visor'
),
url(
r'^tv/visor/(?P<slug>[-\w]+)/cuerpos/$',
TvVisorCuerposDetailView.as_view(),
name='tv_visor_cuerpos'
),
# TODO: Eliminar. Vistas obsoletas debido a las vistas de VisorTv. Sólo se
# mantienen para compatibilidad con los visores que funcionan actualmente.
url(
r'^tv/bedelia/(?P<slug>[-\w]+)/$',
TvVisorDetailView.as_view(),
name='tv_bedelia'
),
url(
r'^tv/bedelia/(?P<slug>[-\w]+)/cuerpos/$',
TvVisorCuerposDetailView.as_view(),
name='tv_bedelia_cuerpos'
),
]
|
mitodl/open-discussions
|
course_catalog/management/commands/print_course_duplicates_yaml.py
|
Python
|
bsd-3-clause
| 397
| 0
|
"""Management command for uploading master json data for OCW courses"""
from django.core.management import BaseComm
|
and
from course_catalog.etl.deduplication import generate_duplicates_yaml
class Command(BaseCommand):
"""Print course duplicates yaml"""
help = "Print course
|
duplicates yaml"
def handle(self, *args, **options):
self.stdout.write(generate_duplicates_yaml())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.