repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Miaou/PyPlagia | libplagia/testLib.py | Python | gpl-3.0 | 1,092 | 0.019337 | #!/usr/bin/python3
# Test de la lib
# Il fa | udrait des tests plus sérieux, en particulier pour vérifier que je me suis pas planté dans les indices dans les algos...
# (ce qui est arrivé au moins une fois)
# Et tester la symétrie
import ctypes
from sys import platform
#import random
import time
if platform.startswith('linux'): # win32
LIB_PATH='./libpla | gia.so' # Le './' est important
else:
LIB_PATH='libplagia.dll'
if __name__=='__main__':
# strAlignScore("dudu dodo toto", "toto")
libplagia = ctypes.cdll.LoadLibrary(LIB_PATH)
lToto = (ctypes.c_uint*4)(*list(map(ord,"toto")))
lBuf = (ctypes.c_uint*5)() # 4+1 pour les méthodes ilar, 4 pour sim.
lBufP = (ctypes.c_uint*5)()
lBig = (ctypes.c_uint*14)(*list(map(ord,"dudu dodo toto")))
print( libplagia.simAGetAlignScore(lBig, 14, lToto, 4, lBuf, lBufP) )
print( libplagia.simBGetAlignScore(lBig, 14, lToto, 4, lBuf, lBufP) )
print( libplagia.ilarAGetAlignScore(lBig, 14, lToto, 4, lBuf, lBufP) )
print( libplagia.ilarBGetAlignScore(lBig, 14, lToto, 4, lBuf, lBufP) )
|
ezequielpereira/Time-Line | timelinelib/drawing/interface.py | Python | gpl-3.0 | 3,967 | 0.000252 | # Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
"""
Defines the interface that drawers should adhere to.
"""
class Drawer(object):
"""
Draw timeline onto a device context and provide information about drawing.
"""
def draw(self, dc, timeline, view_properties, config):
"""
| Draw a representation of a timeline.
The dc is used to do the actual drawing. The timeline is used to get
the events to visualize. The view properties contains information like
which events are selected in the view we are drawing for and what
period is currently displayed.
When the dc is temporarily stored in a class variable such as self.dc,
this class v | ariable must be deleted before the draw method ends.
"""
def use_fast_draw(self, value):
self.fast_draw = value
def event_is_period(self, time_period):
"""
Return True if the event time_period will make the event appear
below the center line, as a period event.
"""
return None
def snap(self, time, snap_region=10):
"""Snap time to minor strip if within snap_region pixels."""
return time
def snap_selection(self, period_selection):
"""
Return a tuple where the selection has been stretched to fit to minor
strip.
period_selection: (start, end)
Return: (new_start, new_end)
"""
return period_selection
def event_at(self, x, y):
"""
Return the event at pixel coordinate (x, y) or None if no event there.
"""
return None
def event_with_rect_at(self, x, y):
"""
Return the event at pixel coordinate (x, y) and its rect in a tuple
(event, rect) or None if no event there.
"""
return None
def event_rect_at(self, event):
"""
Return the rect for the given event or None if no event isn't found.
"""
return None
def is_balloon_at(self, event, x, y):
"""
Return True if a balloon for event is drawn at (x, y), otherwise False.
"""
def get_closest_overlapping_event(self, event_to_move, up=True):
raise NotImplementedError()
class Strip(object):
"""
An interface for strips.
The different strips are implemented in subclasses below.
The timeline is divided in major and minor strips. The minor strip might
for example be days, and the major strip months. Major strips are divided
with a solid line and minor strips with dotted lines. Typically maximum
three major strips should be shown and the rest will be minor strips.
"""
def label(self, time, major=False):
"""
Return the label for this strip at the given time when used as major or
minor strip.
"""
def start(self, time):
"""
Return the start time for this strip and the given time.
For example, if the time is 2008-08-31 and the strip is month, the
start would be 2008-08-01.
"""
def increment(self, time):
"""
Increment the given time so that it points to the start of the next
strip.
"""
def is_day(self):
return False
|
elsevierlabs-os/soda | src/main/python/sodaclient_test.py | Python | apache-2.0 | 2,973 | 0.006054 | # -*- coding: utf-8 -* | -
import unittest
import sodaclient
SODA_URL = "http://localhost:8080"
LEXICON_NAME = "test_countries-3"
MATCHINGS = | ["exact", "lower", "stop", "stem1", "stem2", "stem3"]
LOOKUP_ID = "http://test-countries-3.com/ABW"
TEXT = "Institute of Clean Coal Technology, East China University of Science and Technology, Shanghai 200237, China"
PHRASE = "Emirates"
PHRASE_MATCHINGS = ["lsort", "s3sort"]
PHRASE_MATCHINGS.extend(MATCHINGS)
class SodaClientTest(unittest.TestCase):
def test_001_index(self):
soda_client = sodaclient.SodaClient(SODA_URL)
index_resp = soda_client.index()
self.assertEquals("ok", index_resp["status"])
def test_002_add(self):
soda_client = sodaclient.SodaClient(SODA_URL)
fin = open("../resources/test-countries.tsv", "r")
num_loaded = 0
for line in fin:
id, syns = line.strip().split("\t")
names = syns.split("|")
id = id.replace("test-countries", "test-countries-3")
commit = num_loaded % 100 == 0
add_resp = soda_client.add(LEXICON_NAME, id, names, commit)
self.assertEquals("ok", add_resp["status"])
num_loaded += 1
add_resp = soda_client.add(LEXICON_NAME, None, None, True)
self.assertEquals("ok", add_resp["status"])
def test_003_dicts(self):
soda_client = sodaclient.SodaClient(SODA_URL)
dict_resp = soda_client.dicts()
self.assertEquals("ok", dict_resp["status"])
def test_004_annot(self):
soda_client = sodaclient.SodaClient(SODA_URL)
for matching in MATCHINGS:
annot_resp = soda_client.annot(LEXICON_NAME, TEXT, matching)
self.assertEquals("ok", annot_resp["status"])
def test_005_coverage(self):
soda_client = sodaclient.SodaClient(SODA_URL)
for matching in MATCHINGS:
coverage_resp = soda_client.coverage(TEXT, matching)
self.assertEquals("ok", coverage_resp["status"])
def test_006_lookup(self):
soda_client = sodaclient.SodaClient(SODA_URL)
lookup_resp = soda_client.lookup(LEXICON_NAME, LOOKUP_ID)
self.assertEquals("ok", lookup_resp["status"])
self.assertEquals(1, len(lookup_resp["entries"]))
def test_007_rlookup(self):
soda_client = sodaclient.SodaClient(SODA_URL)
for matching in PHRASE_MATCHINGS:
rlookup_resp = soda_client.rlookup(LEXICON_NAME, PHRASE, matching)
self.assertEquals("ok", rlookup_resp["status"])
def test_008_delete(self):
soda_client = sodaclient.SodaClient(SODA_URL)
delete_resp_single = soda_client.delete(LEXICON_NAME, LOOKUP_ID)
self.assertEquals("ok", delete_resp_single["status"])
delete_resp = soda_client.delete(LEXICON_NAME, "*")
self.assertEquals("ok", delete_resp["status"])
if __name__ == "__main__":
unittest.main()
|
jainaman224/zenodo | tests/unit/records/test_schemas_csl.py | Python | gpl-2.0 | 5,246 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo CSL mapping test."""
from __future__ import absolute_import, print_function
from datetime import datetime
from invenio_records.api import Record
from zenodo.modules.records.serializers import csl_v1
def test_minimal(db, minimal_record, recid_pid):
"""Test minimal record."""
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
d = datetime.utcnow().date()
assert obj == {
'id': '123',
'type': 'article',
'title': 'Test',
'abstract': 'My description',
'author': [
{'family': 'Test'},
],
'issued': {
'date-parts': [[d.year, d.month, d.day]]
}
}
def test_type(db, minimal_record, recid_pid):
""""Test type."""
minimal_record.update({
'resource_type': {'type': 'publication', 'subtype': 'thesis'}
})
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['type'] == 'thesis'
minimal_re | cord.update({
'resource_type': {'type': 'publication'}
})
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['type'] == 'article'
minimal_record.update({
'resource_typ | e': {'type': 'image'}
})
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['type'] == 'graphic'
def test_author(db, minimal_record, recid_pid):
""""Test author."""
minimal_record['creators'] = []
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['author'] == []
minimal_record['creators'] = [
{'familyname': 'TestFamily1', 'givennames': 'TestGiven1'},
{'familyname': 'TestFamily2', 'name': 'TestName2'},
{'name': 'TestName3'},
]
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['author'] == [
{'family': 'TestFamily1', 'given': 'TestGiven1'},
{'family': 'TestName2'},
{'family': 'TestName3'},
]
def test_identifiers(db, minimal_record, recid_pid):
""""Test identifiers."""
minimal_record['doi'] = '10.1234/foo'
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['DOI'] == '10.1234/foo'
assert 'publisher' not in obj
minimal_record['doi'] = '10.5281/foo'
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['DOI'] == '10.5281/foo'
assert obj['publisher'] == 'Zenodo'
minimal_record['imprint'] = {'isbn': '978-1604598933'}
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['ISBN'] == '978-1604598933'
minimal_record['alternate_identifiers'] = [{
'identifier': 'ISSN 0264-2875',
'scheme': 'issn'
}]
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['ISSN'] == 'ISSN 0264-2875'
def test_journal(db, minimal_record, recid_pid):
"""Test journal record."""
minimal_record['journal'] = {
'volume': '42',
'issue': '7',
'title': 'Journal title',
'pages': '10-20',
}
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['container_title'] == 'Journal title'
assert obj['volume'] == '42'
assert obj['issue'] == '7'
assert obj['page'] == '10-20'
def test_part_of(db, minimal_record, recid_pid):
"""Test journal record."""
minimal_record['part_of'] = {
'title': 'Conference proceedings title',
'pages': '10-20',
}
minimal_record['imprint'] = {
'publisher': 'The Good Publisher',
'place': 'Somewhere',
}
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['container_title'] == 'Conference proceedings title'
assert obj['page'] == '10-20'
assert obj['publisher'] == 'The Good Publisher'
assert obj['publisher_place'] == 'Somewhere'
def test_other(db, minimal_record, recid_pid):
"""Test other fields."""
minimal_record['language'] = 'en'
minimal_record['notes'] = 'Test note'
minimal_record['imprint'] = {
'publisher': 'Zenodo',
}
obj = csl_v1.transform_record(recid_pid, Record(minimal_record))
assert obj['language'] == 'en'
assert obj['note'] == 'Test note'
assert obj['publisher'] == 'Zenodo'
|
lerker/cupydle | cupydle/dnn/gridSearch.py | Python | apache-2.0 | 4,741 | 0.003586 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Ponzoni, Nelson"
__copyright__ = "Copyright 2015"
__credits__ = ["Ponzoni Nelson"]
__maintainer__ = "Ponzoni Nelson"
__contact__ = "npcuadra@gmail.com"
__email__ = "npcuadra@gmail.com"
__license__ = "GPL"
__version__ = "1.0.0"
__status__ = "Production"
"""
GRID search
"""
from collections import Mapping
from functools import partial, reduce
import operator
from itertools import product
import numpy as np
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': Tru | e}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... | {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
if __name__ == '__main__':
param_grid = {'a': [1, 2], 'b': [True, False]}
a = ParameterGrid(param_grid)
print(list(a))
print(len(a))
print(a[1])
print(a)
|
oliverdrake/brewtoad-scraper | tests.py | Python | mit | 2,584 | 0 | import mock
from nose.tools import assert_equals, raises
from scrapebrewtoad import Brewtoad
user = "me"
password = "password"
session = mock.Mock()
session.post.return_value.url = "http://dummyurl.com/blah/12345/"
@mock.patch("pyquery.PyQuery")
def test_init_stores_params_and_calls_login(PyQuery):
toad = Brewtoad(username_email=user, password=password, session=session)
assert_equals(toad.username_email, user)
assert_equals(toad.password, password)
assert_equals(toad.session, session)
@mock.patch("pyquery.PyQuery")
@mock.patch("requests.Session")
def test_init_uses_requests_session(Session, PyQuery):
Session.return_value = session
toad = Brewtoad(username_email=user, password=password, session=None)
assert_equals(toad.session, Session() | )
@raises(ValueError)
@mock.patch("pyquery.PyQuery")
def test_init_raises_Val | ueError_if_login_post_returns_bad_response(PyQuery):
# If server doesn't return a url with a user id embedded:
with mock.patch.object(
session.post.return_value, 'url', "http://dummyurl.com/"):
Brewtoad(username_email=user, password=password, session=session)
@mock.patch("pyquery.PyQuery")
def test_init_submits_correct_login_form_data(PyQuery):
Brewtoad(username_email=user, password=password, session=session)
token = Brewtoad.get_authenticity_token('')
session.post.assert_called_with(
Brewtoad.SIGN_IN_URL,
data={
'user[login]': user,
'user[password]': password,
"commit": "Sign+in",
"authenticity_token": token,
})
@mock.patch("pyquery.PyQuery")
def test_init_sets_userid(PyQuery):
toad = Brewtoad(username_email=user, password=password, session=session)
assert_equals(toad.userid, 12345)
@raises(ValueError)
@mock.patch.object(Brewtoad, '_login')
def test_user_recipes_login_not_called(login):
toad = Brewtoad(username_email=user, password=password, session=session)
toad.user_recipes
@mock.patch("pyquery.PyQuery")
def test_get_authenticity_token(PyQuery):
token = Brewtoad.get_authenticity_token('dummyurl')
PyQuery.assert_called_with(url='dummyurl')
PyQuery().assert_called_with('[name="authenticity_token"]')
assert_equals(token, PyQuery()().val())
@mock.patch("pyquery.PyQuery")
def test_user_recipes(PyQuery):
PyQuery()().filter.return_value = [mock.Mock() for i in range(2)]
toad = Brewtoad(username_email=user, password=password, session=session)
expected = [e.text for e in PyQuery()().filter()]
assert_equals(toad.user_recipes, expected)
|
nevil-brownlee/pypy-libtrace | lib/natkit/try-dict.py | Python | gpl-3.0 | 2,776 | 0.009006 |
import plt, ipp
import os, string
print "Starting try-trace.py: dir() = %s" % dir()
# try-trace.py: test program for pypy plt
print "- - - - -"
print "NO_COMRESSION = %d" % plt.NO_COMPRESSION
#base = "/Users/jbro111" # OSX
base = "/home/nevil" # Ubuntu
#fn = "pypy/small-sample.erf"
#fn = "tcp-analyse/fdt-p5.pcap"
#fn = "pypy/small-sample.pcap"
fn = "pypy/1000packets.pcap.gz"
full_fn = base + '/' + fn
print "%s: isfile %s" % (full_fn, os.path.isfile(full_fn))
#try:
# with open(full_fn) as file | :
# print "File opened OK"
# file.close()
#except IOError as e:
# print "Unable to open file" #Does not exist OR no read permissions
trace_format = "pcapfile"
#trace_format = "erf"
uri = trace_format + ':' + full_fn
print ">> uri = %s" % uri
t = plt.trace(uri)
t.start()
test_dict = {}
def print_first(s, n):
for x in range(n):
if x%8 == 0:
print "",
print "%02x" % ord(s[x]),
for n,pkt in enumerate | (t):
ip = pkt.ip
print "--- n=%d ---" % n
print "pkt linktype %d, ethertype %04x, vlan_id %d" % (
pkt.linktype, pkt.ethertype, pkt.vlan_id)
print "ip.seconds = %.6f, ip.ts_sec = %d, ip.time = %s" % (
ip.seconds, ip.ts_sec, ip.time)
print "ip.erf_time = %s" % ip.erf_time
print "ip.wire_len = %s, ip.capture_len = %s, direction = %s" % (
ip.wire_len, ip.capture_len, ip.direction)
ba = ip.data
print "@@ 1 @@ ba = %s" % ba
print "IP.data:",
for b in ba:
print "%02x" % b, # ba[x],
print
sa = ip.src_prefix; da = ip.dst_prefix
print "*** %s -> %s" % (sa, da)
print "sa.addr = %s" % sa.addr[0:4]
for v in sa.addr:
print "%02x" % v,
print
print "- - - - -"
bsa = bytearray(sa.addr)
for v in bsa:
print "%02x" % v,
print
print "ba = %s" % plt.ipp.IPprefix(4, bsa)
# If we import plt, ipp above, we couild say ipp.IPprefix here
print "= = = = ="
#exit()
s = str(sa.addr) + '|' + str(da.addr) # Have to convert to str explicitly
print "*** %s -> %s %s" % (sa, da, s)
print_first(s, 9)
print " ident = %04x" % ip.ident
v = test_dict.get(s)
if not v:
test_dict[s] = 1
else:
test_dict[s] = v+1
if n == 0: # Zero-org
break
print "EOF - - -"
#exit()
def ca2str(cdata_array):
s = string.join(cdata_array, '')
n = string.find(s, '\x00')
return s[0:n]
keys = sorted(test_dict)
for k in keys:
print "%8d %s" % (test_dict[k], k)
ka = k.split('*')
for j in range(0,4):
print "%02x" % ord(ka[0][j]),
print
psa = ipp.IPprefix(4, bytearray(ka[0]))
print "psa = %s" % psa
print "%8d %s -> %s" % (test_dict[k],
ka[0], ka[1])
|
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/test/test_application.py | Python | bsd-3-clause | 24,190 | 0.002728 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, copy, os, pickle, warnings
from twisted.trial import unittest, util
from twisted.application import service, internet, app
from twisted.persisted import sob
from twisted.python import log, usage
from twisted.python.util import sibpath
from twisted.internet import interfaces, defer
from twisted.protocols import wire, basic
from twisted.internet import protocol, reactor
from twisted.internet.utils import getProcessOutputAndValue
from twisted.application import reactors
try:
from twisted.web import microdom
gotMicrodom = True
except ImportError:
warnings.warn("Not testing xml persistence as twisted.web.microdom "
"not available")
gotMicrodom = False
oldAppSuppressions = [util.suppress(message='twisted.internet.app is deprecated',
category=DeprecationWarning)]
class Dummy:
processName=None
class TestService(unittest.TestCase):
def testName(self):
s = service.Service()
s.setName("hello")
self.failUnlessEqual(s.name, "hello")
def testParent(self):
s = service.Service()
p = service.MultiService()
s.setServiceParent(p)
self.failUnlessEqual(list(p), [s])
self.failUnlessEqual(s.parent, p)
def testApplicationAsParent(self):
s = service.Service()
p = service.Application("")
s.setServiceParent(p)
self.failUnlessEqual(list(service.IServiceCollection(p)), [s])
self.failUnlessEqual(s.parent, service.IServiceCollection(p))
def testNamedChild(self):
s = service.Service()
p = service.MultiService()
s.setName("hello")
s.setServiceParent(p)
self.failUnlessEqual(list(p), [s])
self.failUnlessEqual(s.parent, p)
self.failUnlessEqual(p.getServiceNamed("hello"), s)
def testDoublyNamedChild(self):
s = service.Service()
p = service.MultiService()
s.setName("hello")
s.setServiceParent(p)
self.failUnlessRaises(RuntimeError, s.setName, "lala")
def testDuplicateNamedChild(self):
s = service.Service()
p = service.MultiService()
s.setName("hello")
s.setServiceParent(p)
s = service.Service()
s.setName("hello")
self.failUnlessRaises(RuntimeError, s.setServiceParent, p)
def testDisowning(self):
s = service.Service()
p = service.MultiService()
s.setServiceParent(p)
self.failUnlessEqual(list(p), [s])
self.failUnlessEqual(s.parent, p)
s.disownServiceParent()
self.failUnlessEqual(list(p), [])
self.failUnlessEqual(s.parent, None)
def testRunning(self):
s = service.Service()
self.assert_(not s.running)
s.startService()
self.assert_(s.running)
s.stopService()
self.assert_(not s.running)
def testRunningChildren(self):
s = service.Service()
p = service.MultiService()
s.setServiceParent(p)
self.assert_(not s.running)
self.assert_(not p.running)
p.startService()
self.assert_(s.running)
self.assert_(p.running)
p.stopService()
self.assert_(not s.running)
self.assert_(not p.running)
def testRunningChildren(self):
s = service.Service()
def checkRunning():
self.assert_(s.running)
t = service.Service()
t.stopService = checkRunning
t.startService = checkRunning
p = service.MultiService()
s.setServiceParent(p)
t.setServiceParent(p)
p.startService()
p.stopService()
def testAddingIntoRunning(self):
p = service.MultiService()
p.startService()
s = service.Service()
self.assert_(not s.running)
s.setServiceParent(p)
self.assert_(s.running)
s.disownServiceParent()
self.assert_(not s.running)
def testPrivileged(self):
s = service.Service()
def pss():
s.privilegedStarted = 1
s.privilegedStartService = pss
s1 = service.Service()
p = service.MultiService()
s.setServiceParent(p)
s1.setServiceParent(p)
p.privilegedStartService()
self.assert_(s.privilegedStarted)
def testCopying(self):
s = service.Service()
s.startService()
s1 = copy.copy(s)
self.assert_(not s1.running)
self.assert_(s.running)
if hasattr(os, "getuid"):
cur | uid = os.getuid()
curgid = os.getgid()
else:
curuid = curgid = 0
class TestProcess(unittest.TestCase):
def testID(self):
p = service.Process(5, 6)
self.assertEqual(p.uid, 5)
| self.assertEqual(p.gid, 6)
def testDefaults(self):
p = service.Process(5)
self.assertEqual(p.uid, 5)
self.assertEqual(p.gid, None)
p = service.Process(gid=5)
self.assertEqual(p.uid, None)
self.assertEqual(p.gid, 5)
p = service.Process()
self.assertEqual(p.uid, None)
self.assertEqual(p.gid, None)
def testProcessName(self):
p = service.Process()
self.assertEqual(p.processName, None)
p.processName = 'hello'
self.assertEqual(p.processName, 'hello')
class TestInterfaces(unittest.TestCase):
def testService(self):
self.assert_(service.IService.providedBy(service.Service()))
def testMultiService(self):
self.assert_(service.IService.providedBy(service.MultiService()))
self.assert_(service.IServiceCollection.providedBy(service.MultiService()))
def testProcess(self):
self.assert_(service.IProcess.providedBy(service.Process()))
class TestApplication(unittest.TestCase):
def testConstructor(self):
service.Application("hello")
service.Application("hello", 5)
service.Application("hello", 5, 6)
def testProcessComponent(self):
a = service.Application("hello")
self.assertEqual(service.IProcess(a).uid, None)
self.assertEqual(service.IProcess(a).gid, None)
a = service.Application("hello", 5)
self.assertEqual(service.IProcess(a).uid, 5)
self.assertEqual(service.IProcess(a).gid, None)
a = service.Application("hello", 5, 6)
self.assertEqual(service.IProcess(a).uid, 5)
self.assertEqual(service.IProcess(a).gid, 6)
def testServiceComponent(self):
a = service.Application("hello")
self.assert_(service.IService(a) is service.IServiceCollection(a))
self.assertEqual(service.IService(a).name, "hello")
self.assertEqual(service.IService(a).parent, None)
def testPersistableComponent(self):
a = service.Application("hello")
p = sob.IPersistable(a)
self.assertEqual(p.style, 'pickle')
self.assertEqual(p.name, 'hello')
self.assert_(p.original is a)
class TestLoading(unittest.TestCase):
def test_simpleStoreAndLoad(self):
a = service.Application("hello")
p = sob.IPersistable(a)
for style in 'xml source pickle'.split():
if style == 'xml' and not gotMicrodom:
continue
p.setStyle(style)
p.save()
a1 = service.loadApplication("hello.ta"+style[0], style)
self.assertEqual(service.IService(a1).name, "hello")
open("hello.tac", 'w').writelines([
"from twisted.application import service\n",
"application = service.Application('hello')\n",
])
a1 = service.loadApplication("hello.tac", 'python')
self.assertEqual(service.IService(a1).name, "hello")
class TestAppSupport(unittest.TestCase):
def testPassphrase(self):
self.assertEqual(app.getPassphrase(0), None)
def testLoadApplication(self):
a = service.Application("hello")
baseconfig = {'file': None, 'xml': None, 'source': None, 'python':None}
for style in 'source xml pickle'.split():
if style == 'xml' and not gotMicrodom:
continue
co |
benjamin9999/python-stix | stix/bindings/extensions/vulnerability/cvrf_1_1.py | Python | bsd-3-clause | 25,422 | 0.005035 | # Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Apr 11 15:07:59 2013 by generateDS.py version 2.9a.
#
import sys
import getopt
import re as re_
import stix.bindings.exploit_target as exploit_target_binding
import base64
from datetime import datetime, tzinfo, timedelta
XML_NS = "http://stix.mitre.org/extensions/Vulnerability#CVRF-1"
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser(huge_tree=True)
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(tzinfo):
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if isinstance(input_data, basestring):
return input_data
if input_data.microsecond == 0:
_svalue = input_data.strftime('%Y-%m-%d | T%H:%M:%S')
else:
_svalue = input_data.strftime('%Y-%m-%dT%H:%M:%S.%f')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_secon | ds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_parse_datetime(self, input_data, node, input_name=''):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo = tz)
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = input_data.strftime('%Y-%m-%d')
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
|
open-pli/enigma2 | lib/python/Screens/InfoBar.py | Python | gpl-2.0 | 22,960 | 0.029268 | from Tools.Profile import profile
from Tools.BoundFunction import boundFunction
from enigma import eServiceReference
# workaround for required config entry dependencies.
import Screens.MovieSelection
from Screen import Screen
from Screens.MessageBox import MessageBox
profile("LOAD:enigma")
import enigma
profile("LOAD:InfoBarGenerics")
from Screens.InfoBarGenerics import InfoBarShowHide, \
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarRdsDecoder, \
InfoBarEPG, InfoBarSeek, InfoBarInstantRecord, InfoBarRedButton, InfoBarTimerButton, InfoBarVmodeButton, \
InfoBarAudioSelection, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey, \
InfoBarSubserviceSelection, InfoBarShowMovies, InfoBarTimeshift, \
InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarBuffer, \
InfoBarSummarySupport, InfoBarMoviePlayerSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions, \
InfoBarSubtitleSupport, InfoBarPiP, InfoBarPlugins, InfoBarServiceErrorPopupSupport, InfoBarJobman, InfoBarPowersaver, \
InfoBarHDMI, setResumePoint, delResumePoint
from Screens.Hotkey import InfoBarHotkey
profile("LOAD:InitBar_Components")
from Components.ActionMap import HelpableActionMap
from Components.config import config
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
profile("LOAD:HelpableScreen")
from Screens.HelpMenu import HelpableScreen
class InfoBar(InfoBarBase, InfoBarShowHide,
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder,
InfoBarInstantRecord, InfoBarAudioSelection, InfoBarRedButton, InfoBarTimerButton, InfoBarVmodeButton,
HelpableScreen, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey,
InfoBarSubserviceSelection, InfoBarTimeshift, InfoBarSeek, InfoBarCueSheetSupport, InfoBarBuffer,
InfoBarSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions,
InfoBarPiP, InfoBarPlugins, InfoBarSubtitleSupport, InfoBarServiceErrorPopupSupport, InfoBarJobman, InfoBarPowersaver,
InfoBarHDMI, InfoBarHotkey, Screen):
ALLOW_SUSPEND = True
instance = None
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = HelpableActionMap(self, "InfobarActions",
{
"showMovies": (self.showMovies, _("Play recorded movies...")),
"showRadio": (self.showRadio, _("Show the radio player...")),
"showTv": (self.showTv, _("Show the tv player...")),
"showWWW": (self.showWWW, _("Open WebBrowser...")),
"toogleTvRadio": (self.toogleTvRadio, _("toggels betwenn tv and radio...")),
"openSleepTimer": (self.openSleepTimer, _("Show the Sleep Timer...")),
"showMediaPlayer": (self.showMediaPlayer, _("Show the media player...")),
"openTimerList": (self.openTimerList, _("Show the tv player...")),
"openIMDB": (self.openIMDB, _("Show the tv player...")),
"openBouquetList": (self.openBouquetList, _("open bouquetlist")),
"showSetup": (self.showSetup, _("Show setup...")),
'HarddiskSetup': (self.HarddiskSetup, _('Select HDD')),
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugins...")),
"showPortal": (self.showPORTAL, _("Open MediaPortal...")),
}, prio=2)
self.allowPiP = True
self.radioTV = 0
for x in HelpableScreen, \
InfoBarBase, InfoBarShowHide, \
InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder, \
InfoBarInstantRecord, InfoBarAudioSelection, InfoBarRedButton, InfoBarTimerButton, InfoBarUnhandledKey, InfoBarVmodeButton,\
InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarSubserviceSelection, InfoBarBuffer, \
InfoBarTimeshift, InfoBarSeek, InfoBarCueSheetSupport, InfoBarSummarySupport, InfoBarTimeshiftState, \
InfoBarTeletextPlugin, InfoBarExtensions, InfoBarPiP, InfoBarSubtitleSupport, InfoBarJobman, InfoBarPowersaver, \
InfoBarPlugins, InfoBarServiceErrorPopupSupport, InfoBarHotkey:
x.__init__(self)
self.helpList.append((self["actions"], "InfobarActions", [("showMovies", _("Watch recordings..."))]))
self.helpList.append((self["actions"], "InfobarActions", [("showRadio", _("Listen to the radio..."))]))
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
enigma.iPlayableService.evUpdatedEventInfo: self.__eventInfoChanged
})
self.current_begin_time=0
assert InfoBar.instance is None, "class InfoBar is a singleton class and just one instance of this class is allowed!"
InfoBar.instance = self
def __onClose(self):
InfoBar.instance = None
def __eventInfoChanged(self):
if self.execing:
service = self.session.nav.getCurrentService()
old_begin_time = self.current_begin_time
info = service and service.info()
ptr = info and info.getEvent(0)
self.current_begin_time = ptr and ptr.getBeginTime() or 0
if config.usage.show_infobar_on_event_change.value:
if old_begin_time and old_begin_time != self.current_begin_time:
self.doShow()
def __checkServiceStarted(self):
self.__serviceStarted(True)
self.onExecBegin.remove(self.__checkServiceStarted)
def serviceStarted(self): #override from InfoBarShowHide
new = self.servicelist.newServicePlayed()
if self.execing:
InfoBarShowHide.serviceStarted(self)
self.current_begin_time=0
elif not self.__checkServiceStarted in self.onShown and new:
self.onShown.append(self.__checkServiceStarted)
def __checkServiceStarted(self):
self.serviceStarted()
self.onShown.remove(self.__checkServiceStarted)
def showTv(self):
self.showTvChannelList(True)
def showRadio(self):
if config.usage.e1like_radio_mode.value:
self.showRadioChannelList(True)
else:
self.rds_display.hide() # in InfoBarRdsDecoder
from Screens.ChannelSelection import ChannelSelectionRadio
self.session.openWithCallback(self.ChannelSelectionRadioClosed, ChannelSelectionRadio, self)
def toogleTvRadio(self):
if self.radioTV == 1:
self.radioTV = 0
self.showTv()
else:
self.radioTV = 1
self.showRadio()
def ChannelSelectionRadioClosed(self, *arg):
self.rds_display.show() # in InfoBarRdsDecoder
self.servicelist.correctChannelNumber()
def showMovies(self, defaultRef=None):
self.lastservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.openWithCallback(self.movieSelected, Screens.MovieSelection.MovieSelection, defaultRef or eServiceReference(config.usage.last_movie_played.value), timeshiftEnabled = self.timeshiftEnabled())
def movieSelected(self, service):
ref = self.lastservice
del self.lastservice
if service is None:
if ref and not self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.session.nav.playService(ref)
else:
from Components.ParentalControl import parentalControl
if parentalControl.isServicePlayable(service, self.openMoviePlayer):
self.openMoviePlayer(service)
def openMoviePlayer(self, ref):
self.session.open(MoviePlayer, ref, slist=self.servicelist, lastservice=self.session.nav.getCurrentlyPlayingServiceOrGroup(), infobar=self)
def showWWW(self):
try:
from Plugins.Extensions.opera.plugin import *
self.session.open(OperaBrows | er)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The OperaBrowser plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openSleepTimer(self):
from Screens.SleepTimerEdit import SleepTimerEdit
self.session.open(SleepTimerEdit)
def openTimerList(self):
fro | m Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
def showMediaPlayer(self):
try:
from Plugins.Extensions.MediaPlayer.plugin import MediaPlayer
self.session.open(MediaPlayer)
no_plugin = False
except Exception, e:
self.session.open(MessageBox, _("The MediaPlayer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def openIMDB(self):
try:
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU ,PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("IMDb Details"):
self.runPlugin(plugin)
break
|
shianchin/mini_projects | stock_commision_fee_cal/stock.py | Python | apache-2.0 | 6,253 | 0.016152 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------
# Project : Stock commision fee calculator
#
# File name : stock.py
#
# Author : Cheang Shian Chin
#
# Date created : 20 Nov 2015
#
# Purpose : Calculate stock sell/buy fees and breakeven price.
#
#----------------------------------------------------------------------
def main():
action = str(input('{sell | buy | breakeven}: '))
price_per_share = float(input('Price per share in RM: '))
sha | re_lot = int(input('How many lots ( 1 lot = 100 shares): '))
gross_amount = price_per_share * share_lot * 100
print ("")
print (" Buy/Se | ll/Breakeven ", '{:>9}'.format(action.upper()))
print (" Quantity = ", '{:>9}'.format('{:,}'.format(share_lot * 100)))
print (" Price = RM ", '{:>9}'.format('{:,.6f}'.format(price_per_share)))
print (" Gross amount = RM ", '{:>9}'.format('{:,.2f}'.format(gross_amount)))
print ("")
if (action == 'buy'):
total_amount_due = buy(gross_amount, action)
print (" TOTAL AMOUNT DUE = RM ", '{:>9}'.format('{:,.2f}'.format(total_amount_due)))
elif (action == 'sell'):
total_amount_due = sell(gross_amount, action)
print (" TOTAL AMOUNT DUE = RM ", '{:>9}'.format('{:,.2f}'.format(total_amount_due)))
elif (action == 'breakeven'):
price_per_share = breakeven(gross_amount, price_per_share, share_lot, action)
print (" Breakeven price = RM ", '{:>9}'.format('{:,.6f}'.format(price_per_share)))
def buy(gross_amount, action):
total_fee = fee_calculate(gross_amount, action)
total_amount_due = gross_amount + total_fee
return total_amount_due
def sell(gross_amount, action):
total_fee = fee_calculate(gross_amount, action)
total_amount_due = gross_amount - total_fee
return total_amount_due
def breakeven(gross_amount, price_per_share, share_lot, action):
total_amount_due = buy(gross_amount, action)
#TODO: Might want to consolidate these constants with fee_calculate()
brokerage_rate = 0.42/100
clearing_fee_rate = 0.03/100
gst_rate = 6.0/100
contract_stamp = int(total_amount_due / 1000) + 1 # might have problem when gross amount is near boundary of thousand
if (contract_stamp > 200):
contract_stamp = 200 #Max stamp duty = RM200
if (total_amount_due < 2873.78): #For anything less than RM2873.78, min brokerage fee is RM12
brokerage_amount = 12.00
total_fee = (((brokerage_amount)*(clearing_fee_rate + 1)*(gst_rate + 1)
+ (contract_stamp)*((clearing_fee_rate)*(gst_rate + 1) + 1)
+ (total_amount_due)*(clearing_fee_rate)*(gst_rate + 1)) / (1 - (clearing_fee_rate)*(gst_rate + 1)))
#t_f = [b_a(1.060318) + c_s(1.000318) + t_a_d(0.000318)] / (0.999682)
else:
total_fee = ((total_amount_due * (brokerage_rate*(clearing_fee_rate + 1)*(gst_rate + 1)
+ (clearing_fee_rate)*(gst_rate + 1))
+ contract_stamp*((clearing_fee_rate)*(gst_rate + 1) + 1)) / ((1 - (clearing_fee_rate)*(gst_rate + 1))
- brokerage_rate*(clearing_fee_rate + 1)*(gst_rate + 1)))
#t_f = [t_a_d[(b_r)(1.060318) + 0.000318] + c_s(1.000318)] / [0.999682 - (b_r)(1.060318)]
new_gross_amount = total_amount_due + total_fee
price_per_share = new_gross_amount / (share_lot * 100)
#for debug
#print "total_fee from breakeven = ", total_fee
#print "gross_amount breakeven = ", new_gross_amount
#print "price_per_share", price_per_share
return price_per_share
def fee_calculate(gross_amount, action):
brokerage_rate = 0.42/100
clearing_fee_rate = 0.03/100
gst_rate = 6.0/100
brokerage_amount = gross_amount * brokerage_rate
if (brokerage_amount < 12.00):
brokerage_amount = 12.00
contract_stamp = int(gross_amount / 1000) + 1
if (contract_stamp > 200):
contract_stamp = 200 #Max stamp duty = RM200
clearing_fee = (gross_amount + brokerage_amount + contract_stamp) * clearing_fee_rate
if (clearing_fee > 1000):
clearing_fee = 1000 #Max clearing fee = RM1000
gst = (brokerage_amount + clearing_fee) * gst_rate
total_fee = brokerage_amount + contract_stamp + clearing_fee + gst
#for debug
#total_fee_2 = (brokerage_amount)*(clearing_fee_rate + 1)*(gst_rate + 1) + (gross_amount)*(clearing_fee_rate)*(gst_rate + 1) + (contract_stamp)*(clearing_fee_rate)*(gst_rate + 1) + contract_stamp
#print "total_fee_2 = ", total_fee_2
if (action == 'buy' or action == 'breakeven'):
total_excl_gst = gross_amount + total_fee - gst
elif (action == 'sell'):
total_excl_gst = gross_amount - total_fee + gst
if (action != 'breakeven'):
print ("Fees: ")
print (" Brokerage rate = ", '{:>9}'.format('{:.2%}'.format(brokerage_rate)))
print (" Brokerage amount = RM ", '{:>9}'.format('{:,.2f}'.format(brokerage_amount)))
print (" Contract stamp = RM ", '{:>9}'.format('{:,.2f}'.format(contract_stamp)))
print (" Clearing fees = RM ", '{:>9}'.format('{:,.2f}'.format(clearing_fee)))
print (" TOTAL (EXCL GST) = RM ", '{:>9}'.format('{:,.2f}'.format(total_excl_gst)))
print (" GST payable = RM ", '{:>9}'.format('{:,.2f}'.format(gst)))
print (" TOTAL FEE DUE = RM ", '{:>9}'.format('{:,.2f}'.format(total_fee)))
return total_fee
if __name__ == '__main__':
main()
#----------------------------------------------------------------------
# Revision History :
#
# Date Author Ref Revision
# 02-Aug-2016 shianchin 3 Update to Python 3.5
# 10-Jul-2016 shianchin 2 Prompt user input, set max stamp duty
# and clearing fee.
# 20-Nov-2015 shianchin 1 Initial creation.
#
#---------------------------------------------------------------------- |
alunduil/muniments | test_muniments/test_unit/test_scheduler/test_models/__init__.py | Python | mit | 208 | 0 | # Copyright (C) 2015 by Alex Brandt <alunduil@alunduil.com>
#
# muniments is freely distributable under the terms of an MIT-style license.
# See COPYING or ht | tp://www.opensource.org/licenses/mit-licen | se.php.
|
dennisobrien/bokeh | examples/plotting/file/bar_nested_colormapped.py | Python | bsd-3-clause | 1,229 | 0.00895 | from bokeh.io import show, output_file
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
output_file("bar_nested_colormapped.html")
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
years = ['2015', '2016', '2017']
data = {'fruits' : fruits,
'2015' : [2, 1, 4, 3, 2, 4],
'2016' : [5, 3, 3, 2, 4, 6],
'2017' : [3, 2, 4, 4, 5, 3]}
palette = ["#c9d9d3", "#718dbf", "#e84d60"]
# this creates [ | ("Apples", "2015"), ("Apples", "2016"), ("Apples", "2017"), ("Pears", "2015), | ... ]
x = [ (fruit, year) for fruit in fruits for year in years ]
counts = sum(zip(data['2015'], data['2016'], data['2017']), ()) # like an hstack
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(x_range=FactorRange(*x), plot_height=350, title="Fruit Counts by Year",
toolbar_location=None, tools="")
p.vbar(x='x', top='counts', width=0.9, source=source, line_color="white",
fill_color=factor_cmap('x', palette=palette, factors=years, start=1, end=2))
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
show(p)
|
ClaudiaSaxer/PlasoScaffolder | src/plasoscaffolder/dependencies.py | Python | apache-2.0 | 1,470 | 0 | # -*- coding: utf-8 -*-
# fyi: those methods are copied from plaso
"""Functionality to check for the availability and version of dependencies."""
from __future__ import print_function
# Dictionary that contains version tuples per module name.
#
# A version tuple consists of:
# (version_attribute_name, minimum_version, maximum_version, is_required)
#
# Where version_attribute_name is either a name of an attribute,
# property or method.
PYTHON_DEPENDENCIES = {
u'click': (u'__version__', u'6.7', u'6.7', True),
u'setuptools': (u'__version__', u'34.4.1', u'34.4.1', True),
u'Jinja2': (u'__version__', u'2.9.6', u'2.9.6', True),
u'colorama': (u'__version__', u'0.3.7', u'0.3.7', True),
u'yapf': (u'__version__', u'0.16.1', u'0.16.1', True), }
PYTHON_TEST_DEPENDENCIES = {}
# Maps Python module names to DPKG packages.
_DPKG_PACKAGE_NAMES = {
u'click': u'pyt | hon-click',
u'setuptools': u'python-setuptools',
u'Jinja2': u'python-jinja2',
u'colorama': u'python-colorama',
u'yapf': u'yapf3'}
# Maps Python module names to PyPI projects.
_PYPI_PROJECT_NAMES = {
u'click': u'click',
u'setuptools': u'setuptools ',
u'Jinja2': u'Jinja2',
u'colorama': u'colorama',
u'yapf': u'ya | pf'}
# Maps Python module names to RPM packages.
_RPM_PACKAGE_NAMES = {
u'click': u'python-click',
u'setuptools': u'python-setuptools',
u'Jinja2': u'python-Jinja2',
u'colorama': u'python-colorama',
u'yapf': u'yapf'}
|
soodoku/get-cricket-data | scripts/scraper.py | Python | mit | 1,947 | 0.010272 | '''
Download Cricket Data
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import csv
import sys
import time
import os
import unicodedata
from urlparse import urlparse
from BeautifulSoup import BeautifulSoup, SoupStrainer
BASE_URL = 'http://www.espncricinfo.com'
if not os.path.exists('./espncricinfo-fc'):
os.mkdir('./espncricinfo-fc')
for i in range(0, 6019):
#odi: soupy = BeautifulSoup(urllib2.urlopen('http://search.espncricinfo.com/ci/content/match/search.html?search=odi;all=1;page=' + str(i)).read())
#test: soupy = BeautifulSoup(urllib2.urlopen('http://search.espncricinfo.com/ci/content/match/search.html?search=test;all=1;page=' + str(i)).read())
#t20i: soupy = BeautifulSoup(urllib2.urlopen('http://search.espncricinfo.com/ci/content/match/search.html?search=t20i;all=1;page=' + str(i)).read())
#t20: soupy = BeautifulSoup(urllib2.urlopen('http://search.espncricinfo.com/ci/content/match/search.html?search=t20;all=1;page=' + str(i)).read())
#list a: soupy = BeautifulSoup(urllib2.urlopen('http://search.espncricinfo.com/ci/content/match/search.html?search=list%20a;all=1;page=' + | str(i)).read())
#fc:
soupy = BeautifulSoup(urllib2.urlopen('http://search.espncricinfo.com/ci/content/match/search.html?search=first%20class;all=1;page=' + str(i)).read())
time.sleep(1)
for new_host in soupy.findAll('a', {'class' : 'srchPlyrNmTxt'}):
try:
new_host = new_host['href']
except:
continue
odiurl = BASE_URL + urlparse(new_host).geturl()
new_host = unicodedata.normalize('NFKD', new_host).encode( | 'ascii','ignore')
print new_host
#print(type(str.split(new_host)[3]))
print str.split(new_host, "/")[4]
html = urllib2.urlopen(odiurl).read()
if html:
with open('espncricinfo-fc/{0!s}'.format(str.split(new_host, "/")[4]), "wb") as f:
f.write(html)
|
testmana2/test | PyUnit/__init__.py | Python | gpl-3.0 | 429 | 0 | # -*- coding | : utf-8 -*-
# Copyright (c) 2002 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Package implementing an interface to the pyunit unittest package.
The package consist of a single dialog, which may be called as a
standalone version using the eric6_unittest script or from within the eric6
IDE. If it is called from within eric6, it has the additional function to
open a source file that failed a tes | t.
"""
|
gooofy/voxforge | zamiaai/skills/sports/__init__.py | Python | lgpl-3.0 | 735 | 0.009524 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016, 2017, 2018 Guenter Bartsch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obta | in a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DEPENDS = [ 'base', 'dialog' ]
import sports
def get_data(k):
sports.get_data(k)
|
novalis/BusTracker | mta_data/management/commands/mta_to_gtfs.py | Python | gpl-3.0 | 23,554 | 0.013204 | from django.contrib.gis.geos import LineString, Point
from django.core.management.base import BaseCommand
from mta_data_parser import parse_schedule_dir
from mta_data.models import *
from mta_data.utils import st_line_locate_point
from subway_stop_to_gid import stop_id_to_gid, end_of_line
from zipfile import ZipFile
import os
import re
import transitfeed
import zipfile
rename_location = {
'BROADWAY at 207 ST' : 'BROADWAY at W 207 ST',
'NARROWS ROAD S at FINGERBOARD ROAD' : 'NARROWS RD S at FINGERBOARD RD',
'NARROWS RD S at FINGERBOARD ROAD' : 'NARROWS RD S at FINGERBOARD RD',
'NARROWS ROAD S at FINGERBOARD RD' : 'NARROWS RD S at FINGERBOARD RD',
'AVE U at GERRITSEN AV' : 'AV U at GERRITSEN AV',
}
def google_time_from_centiminutes(centiminutes):
#the MTA's day is longer than 24 hours, but that needn't bother us
#so long as we are sure never to use absolute times any time
#subtraction might be required
hours = (centiminutes / 6000)
minutes = (centiminutes % 6000) / 100
seconds = ((centiminutes % 6000) - minutes * 100) * 60 / 100
return "%02d:%02d:%02d" % (hours, minutes, seconds)
class MTABusRoute(models.Model):
gid = models.IntegerField(primary_key=True)
rt_dir = models.CharField(max_length=1)
route = models.CharField(max_length=16)
path = models.CharField(max_length=2)
the_geom = models.GeometryField()
def dump_kml(self):
f = open("/tmp/%s.kml" % self.gid, "w")
coords = self.the_geom.coords
print >>f, """<?xml version="1.0" encoding="UTF-8"?>
< | kml xmlns="http://earth.google.com/kml/2.2">
<Document>
"""
for i, (x, y) in enumerate(coords):
print>>f, """
<Placemark>
<name>
%d
</name>
<Point>
<coordinates>%f, %f</coordinates>
</Point>
</Placemark>
""" % (i, x, y)
print >>f, """</Document>
</kml>
"""
f.close()
print "dumped %s" % self.gid
class MTASubwayRoute(models.Model):
gid = models.IntegerField | (primary_key=True)
id = models.FloatField()
line = models.CharField(max_length=20)
routes = models.CharField(max_length=9)
the_geom = models.GeometryField()
class MTASubwayStop(models.Model):
gid = models.IntegerField(primary_key=True)
routes = models.CharField(max_length=13)
line = models.CharField(max_length=16)
facility = models.CharField(max_length=40)
the_geom = models.GeometryField()
#from Bob Ippolito at
#http://bob.pythonmac.org/archives/2005/03/04/frozendict/
class frozendict(dict):
__slots__ = ('_hash',)
def __hash__(self):
rval = getattr(self, '_hash', None)
if rval is None:
rval = self._hash = hash(frozenset(self.iteritems()))
return rval
def freeze(obj):
if isinstance(obj, dict):
for k in obj:
obj[k] = freeze(obj[k])
return frozendict(obj)
elif isinstance(obj, list):
for i in range(len(obj)):
obj[i] = freeze(obj[i])
return tuple(obj)
else:
return obj
extra_names = {
'S89' : 'S59',
'S90' : 'S40',
'S92' : 'S62',
'S94' : 'S44',
'S96' : 'S46',
'S48' : 'S98',
'S98' : 'S48',
'X17' : 'X17J', #but what about all the other X17 routes?
'S61' : 'S91',
'S91' : 'S61',
'X68' : ['X68A', 'X68B', 'X68C'],
}
fix_direction = {
'BX14' : {'W' : 'S', 'E' : 'N'},
'S74' : {'E' : 'S', 'W' : 'N'}, #fixme: check this
'S54' : {'E' : 'S'}, #one bogus entry
'M31' : {'W' : 'S', 'E' :'N'},
'BX05' : {'E' : 'N', 'W' : 'S'},
'S74' : {'E' : 'N', 'W' : 'S'},
'S84' : {'E' : 'N', 'W' : 'S'},
}
fix_leading_zeros = {
'Q01' : 'Q1',
'Q02' : 'Q2',
'Q03' : 'Q3',
'Q04' : 'Q4',
'Q05' : 'Q5',
'M01' : 'M1',
'M02' : 'M2',
'M03' : 'M3',
'M04' : 'M4',
'M05' : 'M5',
'M06' : 'M6',
'M07' : 'M7',
'M08' : 'M8',
'M09' : 'M9',
'BX03' : 'BX3',
'BX04' : 'BX4',
'BX05' : 'BX5',
'BX06' : 'BX6',
'BX07' : 'BX7',
'BX08' : 'BX8',
'BX09' : 'BX9',
}
loop_routes = set(['B74'])
rename_routes = {
'S7484' : 'S84',
}
_shape_by_stops_cache = {}
def find_shape_by_stops(feed, candidate_routes, stops, table_name):
"""This is brutal -- it matches a set of route paths against a
known set of bus stops to choose the route path which falls
nearest to the trip."""
key = freeze([candidate_routes, stops, table_name])
if key in _shape_by_stops_cache:
return _shape_by_stops_cache[key]
best_route = None
best_dist = 100000000000000
#routes are sorted by length, because we want to use the shortest
#route that matches the points.
for route in sorted(candidate_routes, key=lambda route:route.the_geom.length):
total_dist = 0
for stop in stops:
total_dist += route.the_geom.distance(Point(stop.stop_lon, stop.stop_lat))
if total_dist < best_dist:
best_dist = total_dist
best_route = route
if candidate_routes[0].route == 'S55':
print "The MTA's route shape for S55 is from 2007. So we're skipping it."
return None
if candidate_routes[0].route == 'Q48':
#this is a total hack; the Q48 is in general a total hack
if len(stops) == 22:
for route in candidate_routes:
if route.gid == 10707:
best_route = route
#figure out if the set of stops is shorter than the best route
#(the bus stops or ends in the middle of the route) and if so,
#cut the route down.
start_location = st_line_locate_point(best_route.the_geom, (stops[0].stop_lon, stops[0].stop_lat))
end_location = st_line_locate_point(best_route.the_geom, (stops[-1].stop_lon, stops[-1].stop_lat))
if start_location > end_location:
print "Backwards route %s, Skipping." % route.gid
return None
if end_location - start_location < 0.98 and best_route.route not in loop_routes:
if end_location - start_location < 0.05:
print """"This is a very short route segment. Is it a
miscategorized loop? Route: %s, first and last: %s, %s""" % (
best_route.route, stops[0].location, stops[-1].location)
#create a new shape for the short route
i = 0
while 1:
new_gid = str(best_route.gid * 100 + 20000 + i)
i += 1
try:
feed.GetShape(new_gid)
except KeyError:
break
shape = transitfeed.Shape(new_gid)
#while a binary search for start and end would probably be
#faster, it assumes that the shapes are correctly plotted in
#ascending order, which they appear not to be.
distance = 0
for point in best_route.the_geom.coords:
last_distance = distance
distance = st_line_locate_point(best_route.the_geom, point)
if start_location <= distance - 0.001:
if distance <= end_location + 0.001:
shape.AddPoint(point[1], point[0])
else:
line_distance_span = distance - last_distance;
end_distance_span = end_location - last_distance;
interp_ratio = end_distance_span / line_distance_span
interp_x = last_point[1] * interp_ratio + point[1] * (1 - interp_ratio)
interp_y = last_point[0] * interp_ratio + point[0] * (1 - interp_ratio)
shape.AddPoint(interp_x, interp_y)
last_point = point
feed.AddShapeObject(shape)
else: #not a too-short route
try:
shape = feed.GetShape(str(best_route.gid))
except KeyError:
shape = transitfeed.Shape(str(best_route.gid))
for point in best_route.the_geom.coords:
shape.AddPoint(point[1], point[0])
feed.AddShapeObject(shape)
_shape_by_stops_cache[key] = shape
return shape
def route_for_trip(feed, trip_rec, headsign):
route_id = str(trip_rec['headsign_id'])
if route_id in feed.routes:
|
saurabh6790/omnisys-lib | webnotes/install_lib/install.py | Python | mit | 8,275 | 0.039517 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# called from wnf.py
# lib/wnf.py --install [rootpassword] [dbname] [source]
from __future__ import unicode_literals
import os, sys, json
import webnotes
import webnotes.db
import getpass
from webnotes.model.db_schema import DbManager
from webnotes.model.sync import sync_for
from webnotes.utils import cstr
class Installer:
def __init__(self, root_login, root_password=None, db_name=None, site=None, site_config=None):
make_conf(db_name, site=site, site_config=site_config)
self.site = site
if isinstance(root_password, list):
root_password = root_password[0]
self.make_connection(root_login, root_password)
webnotes.local.conn = self.conn
webnotes.local.session = webnotes._dict({'user':'Administrator'})
self.dbman = DbManager(self.conn)
def make_connection(self, root_login, root_password):
if root_login:
if not root_password:
root_password = webnotes.conf.get("root_password") or None
if not root_password:
root_password = getpass.getpass("MySQL root pass | word: ")
self.root_password = root_password
self.conn = webnotes.db.Database(user=root_login, password=root_password)
def install(self, db_name, source_sql=None, admin_password = 'admin', verbose=0,
force=0):
if force or (db_name not in self.dbman.get_database_list()):
# delete user (if exists)
self.dbman.delete_user(db_name)
else:
raise Exception("Database %s already exists" % (db_name,))
# create user and db
self.dbman.create_user(db_name, webnotes.conf. | db_password)
if verbose: print "Created user %s" % db_name
# create a database
self.dbman.create_database(db_name)
if verbose: print "Created database %s" % db_name
# grant privileges to user
self.dbman.grant_all_privileges(db_name, db_name)
if verbose: print "Granted privileges to user %s and database %s" % (db_name, db_name)
# flush user privileges
self.dbman.flush_privileges()
# close root connection
self.conn.close()
webnotes.connect(db_name=db_name, site=self.site)
self.dbman = DbManager(webnotes.conn)
# import in db_name
if verbose: print "Starting database import..."
# get the path of the sql file to import
if not source_sql:
source_sql = os.path.join(os.path.dirname(webnotes.__file__), "..",
'conf', 'Framework.sql')
self.dbman.restore_database(db_name, source_sql, db_name, webnotes.conf.db_password)
if verbose: print "Imported from database %s" % source_sql
self.create_auth_table()
# fresh app
if 'Framework.sql' in source_sql:
if verbose: print "Installing app..."
self.install_app(verbose=verbose)
# update admin password
self.update_admin_password(admin_password)
# create public folder
from webnotes.install_lib import setup_public_folder
setup_public_folder.make(site=self.site)
if not self.site:
from webnotes.build import bundle
bundle(False)
return db_name
def install_app(self, verbose=False):
sync_for("lib", force=True, sync_everything=True, verbose=verbose)
self.import_core_docs()
try:
from startup import install
except ImportError, e:
install = None
if os.path.exists("app"):
sync_for("app", force=True, sync_everything=True, verbose=verbose)
if os.path.exists(os.path.join("app", "startup", "install_fixtures")):
install_fixtures()
# build website sitemap
from website.doctype.website_sitemap_config.website_sitemap_config import build_website_sitemap_config
build_website_sitemap_config()
if verbose: print "Completing App Import..."
install and install.post_import()
if verbose: print "Updating patches..."
self.set_all_patches_as_completed()
self.assign_all_role_to_administrator()
def update_admin_password(self, password):
from webnotes.auth import _update_password
webnotes.conn.begin()
_update_password("Administrator", webnotes.conf.get("admin_password") or password)
webnotes.conn.commit()
def import_core_docs(self):
install_docs = [
# profiles
{'doctype':'Profile', 'name':'Administrator', 'first_name':'Administrator',
'email':'admin@localhost', 'enabled':1},
{'doctype':'Profile', 'name':'Guest', 'first_name':'Guest',
'email':'guest@localhost', 'enabled':1},
# userroles
{'doctype':'UserRole', 'parent': 'Administrator', 'role': 'Administrator',
'parenttype':'Profile', 'parentfield':'user_roles'},
{'doctype':'UserRole', 'parent': 'Guest', 'role': 'Guest',
'parenttype':'Profile', 'parentfield':'user_roles'},
{'doctype': "Role", "role_name": "Report Manager"}
]
webnotes.conn.begin()
for d in install_docs:
bean = webnotes.bean(d)
bean.insert()
webnotes.conn.commit()
def set_all_patches_as_completed(self):
try:
from patches.patch_list import patch_list
except ImportError, e:
print "No patches to update."
return
for patch in patch_list:
webnotes.doc({
"doctype": "Patch Log",
"patch": patch
}).insert()
webnotes.conn.commit()
def assign_all_role_to_administrator(self):
webnotes.bean("Profile", "Administrator").get_controller().add_roles(*webnotes.conn.sql_list("""
select name from tabRole"""))
webnotes.conn.commit()
def create_auth_table(self):
webnotes.conn.sql_ddl("""create table if not exists __Auth (
`user` VARCHAR(180) NOT NULL PRIMARY KEY,
`password` VARCHAR(180) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8""")
def make_conf(db_name=None, db_password=None, site=None, site_config=None):
try:
from werkzeug.exceptions import NotFound
import conf
try:
webnotes.init(site=site)
except NotFound:
pass
if not site and webnotes.conf.site:
site = webnotes.conf.site
if site:
# conf exists and site is specified, create site_config.json
make_site_config(site, db_name, db_password, site_config)
elif os.path.exists("conf.py"):
print "conf.py exists"
else:
# pyc file exists but py doesn't
raise ImportError
except ImportError:
if site:
raise Exception("conf.py does not exist")
else:
# create conf.py
with open(os.path.join("lib", "conf", "conf.py"), "r") as confsrc:
with open("conf.py", "w") as conftar:
conftar.write(confsrc.read() % get_conf_params(db_name, db_password))
webnotes.destroy()
webnotes.init(site=site)
def make_site_config(site, db_name=None, db_password=None, site_config=None):
import conf
if not getattr(conf, "sites_dir", None):
raise Exception("sites_dir missing in conf.py")
site_path = os.path.join(conf.sites_dir, site)
if not os.path.exists(site_path):
os.mkdir(site_path)
site_file = os.path.join(site_path, "site_config.json")
if not os.path.exists(site_file):
if not (site_config and isinstance(site_config, dict)):
site_config = get_conf_params(db_name, db_password)
with open(site_file, "w") as f:
f.write(json.dumps(site_config, indent=1, sort_keys=True))
def get_conf_params(db_name=None, db_password=None):
if not db_name:
db_name = raw_input("Database Name: ")
if not db_name:
raise Exception("Database Name Required")
if not db_password:
from webnotes.utils import random_string
db_password = random_string(16)
return {"db_name": db_name, "db_password": db_password}
def install_fixtures():
print "Importing install fixtures..."
for basepath, folders, files in os.walk(os.path.join("app", "startup", "install_fixtures")):
for f in files:
f = cstr(f)
if f.endswith(".json"):
print "Importing " + f
with open(os.path.join(basepath, f), "r") as infile:
webnotes.bean(json.loads(infile.read())).insert_or_update()
webnotes.conn.commit()
if f.endswith(".csv"):
from core.page.data_import_tool.data_import_tool import import_file_by_path
import_file_by_path(os.path.join(basepath, f), ignore_links = True, overwrite=True)
webnotes.conn.commit()
if os.path.exists(os.path.join("app", "startup", "install_fixtures", "files")):
if not os.path.exists(os.path.join("public", "files")):
os.makedirs(os.path.join("public", "files"))
os.system("cp -r %s %s" % (os.path.j |
tlseabra/dailyprogrammer | Python/intermediate/i375.py | Python | mit | 352 | 0 | # 15/02/2019
# influenced heavily by Gprime5's solution
def flip(cards):
| order = []
direction = True
for i, card in enumerate(cards):
if direction:
ord | er.insert(0, str(i))
else:
order.append(str(i))
direction ^= (card == '1')
return ' '.join(order) if not direction else 'no solution'
|
TEDICpy/write-it | nuntium/migrations/0007_auto__add_messagerecord.py | Python | gpl-3.0 | 5,320 | 0.007895 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MessageRecord'
db.create_table(u'nuntium_messagerecord', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=255)),
('datetime', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 4, 24, 0, 0))),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal(u'nuntium', ['MessageRecord'])
def backwards(self, orm):
# Deleting model 'MessageRecord'
db.delete_table(u'nuntium_messagerecord')
models = {
u'contactos.contact': {
'Meta': {'object_name': 'Contact'},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.ContactType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.Person']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'contactos.contacttype': {
'Meta': {'object_name': 'ContactType'},
u'id': ('django.db.models.fields. | AutoField', [], {'primary_key': 'True'}),
'label_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_ty | pe'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'nuntium.message': {
'Meta': {'object_name': 'Message'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': "'4'"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'writeitinstance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.WriteItInstance']"})
},
u'nuntium.messagerecord': {
'Meta': {'object_name': 'MessageRecord'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'datetime': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'nuntium.outboundmessage': {
'Meta': {'object_name': 'OutboundMessage'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contactos.Contact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nuntium.Message']"})
},
u'nuntium.writeitinstance': {
'Meta': {'object_name': 'WriteItInstance'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'popit.apiinstance': {
'Meta': {'object_name': 'ApiInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('popit.fields.ApiInstanceURLField', [], {'unique': 'True', 'max_length': '200'})
},
u'popit.person': {
'Meta': {'object_name': 'Person'},
'api_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['popit.ApiInstance']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'popit_url': ('popit.fields.PopItURLField', [], {'default': "''", 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['nuntium'] |
alsrgv/tensorflow | tensorflow/python/distribute/saved_model_save_load_test.py | Python | apache-2.0 | 3,531 | 0.004248 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving and loading using tf's saved_model APIs with DS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import saved_model_test_base as test_base
from tensorflow.python.eager import test
from tensorflow.python.saved_model import saved_model
class SavedModelSaveAndLoadTest(test_base.TestSavedModelBase):
def setUp(self):
self._root_dir = 'saved_model_save_load'
super(SavedModelSaveAndLoadTest, self).setUp()
def _save_model(self, model, saved_dir):
saved_model.save(model, saved_dir)
def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
output_name):
return test_base.load_and_run_with_saved_model_api(distribution, saved_dir,
predict_dataset,
output_name)
@combinations.generate(test_base.simple_models_with_strategies())
def test_save_no_strategy_restore_strategy(self, model_and_input,
distribution):
self.run_test_save_no_strategy_restore_strategy(model_and_input,
| distribution)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategies(),
combinations.combin | e(save_in_scope=[True, False])))
def test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_no_strategy(model_and_input,
distribution, save_in_scope)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategy_pairs(),
combinations.combine(save_in_scope=[True, False])))
def test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_strategy(model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope)
if __name__ == '__main__':
test.main()
|
tonyseek/simple-rbac | examples/proxy.py | Python | mit | 3,965 | 0 | #!/usr/bin/env python
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from rbac.acl import Registry
from rbac.proxy import RegistryProxy
from rbac.context import IdentityContext, PermissionDenied
engine = create_engine('sqlite:///:memory:', echo=False)
Session = sessionmaker(bind=engine)
ModelBase = declarative_base()
class ResourceMixin(object):
def __eq__(self, other):
return hasattr(other, "id") and self.id == other.id
def __hash__(self):
return hash(self.id)
class User(ResourceMixin, ModelBase):
"""User Model"""
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String, unique=True, nullable=False)
roles = Column(String, nullable=False, default="")
def get_roles(self):
ret | urn self.roles.split(",")
def set_roles(self, roles):
self.roles = ",".join(roles)
class Message(ResourceMixin, ModelBase):
"""Message Model"""
__tablename__ = "post"
id = Column(Integer, primary_key=True)
content = Column(String, nullable=False)
owner_id = Column(ForeignKey(User.id), nullable=False)
owner = relationship(User, uselist=False, lazy="joined")
def main():
# current context user
current_user = None
# create a access | control list
acl = RegistryProxy(Registry())
identity = IdentityContext(acl, lambda: current_user.get_roles())
# registry roles and resources
acl.add_role("staff")
acl.add_role("admin")
acl.add_resource(Message)
def check(acl, role, operation, resource):
return db.query(Message).get(resource.id).owner is current_user
is_message_owner = check
acl.allow("staff", "create", Message)
acl.allow("staff", "edit", Message, assertion=is_message_owner)
acl.allow("admin", "edit", Message)
db = Session()
ModelBase.metadata.create_all(engine)
tonyseek = User(name="tonyseek")
tonyseek.set_roles(["staff"])
tom = User(name="tom")
tom.set_roles(["staff"])
admin = User(name="admin")
admin.set_roles(["admin"])
db.add_all([tonyseek, tom, admin])
db.commit()
@identity.check_permission("create", Message)
def create_message(content):
message = Message(content=content, owner=current_user)
db.add(message)
db.commit()
print("%s has craeted a message: '%s'." % (
current_user.name.capitalize(), content))
def edit_message(content, new_content):
message = db.query(Message).filter_by(content=content).one()
if not identity.check_permission("edit", message):
print("%s tried to edit the message '%s' but he will fail." % (
current_user.name.capitalize(), content))
else:
print("%s will edit the message '%s'." % (
current_user.name.capitalize(), content))
with identity.check_permission("edit", message):
message.content = new_content
db.commit()
print("The message '%s' has been edit by %s," % (
content, current_user.name.capitalize()))
print("the new content is '%s'" % new_content)
# tonyseek signed in and create a message
current_user = tonyseek
create_message("Please open the door.")
# tom signed in and edit tonyseek's message
current_user = tom
try:
edit_message("Please open the door.", "Please don't open the door.")
except PermissionDenied:
print("Oh, the operation has been denied.")
# tonyseek signed in and edit his message
current_user = tonyseek
edit_message("Please open the door.", "Please don't open the door.")
# admin signed in and edit tonyseek's message
current_user = admin
edit_message("Please don't open the door.", "Please open the window.")
if __name__ == "__main__":
main()
|
aevri/mel | mel/cmd/rotomapedit.py | Python | apache-2.0 | 21,197 | 0 | """Edit a 'rotomap' series of images.
In all modes:
Press 'q' to quit.
Press 'Q' to quit with exit code 1.
Press left for previous image, right for next image.
Press up for previous map, down for next map.
Ctrl-click on a point to zoom in on it.
Press 'z' or 'x' to adjust the zoom level.
Press space to restore original zoom.
Mode selection:
Press '1' for mole edit mode (the starting mode).
Press '2' for mask edit mode.
Press '3' for bounding area mode.
Press '4' for mole marking mode.
Press '0' for auto-mole debug mode.
In 'mole edit' mode:
Click on a point to add or move a mole there and save.
Shift-click on a point to delete it.
Shift-right-click on a point to randomize the uuid.
Alt-Shift-click on a point to copy it's uuid.
Also, press 'end' or '+' when over a point to copy it's uuid.
Alt-Shift-right-click over a point to make it canonical.
Alt-click on a point to paste the copied uuid.
Press 'o' to toggle follow mode.
Press 'm' to toggle move mode.
Press 'i' to 'rotomap identify' in the current image.
Press enter to toggle mole markers.
In 'mask edit' mode:
Click on a point to draw masking there.
Shift-click on a point to remove masking there.
Press '<' to decrease the size of the mask tool.
Press '>' to increase the size of the mask tool.
Press '.' to reset the size of the mask tool to the default.
In 'mole marking' mode:
Click on a point to add or move a mole there and save.
Shift-click on a point to delete it.
Press 'a' to accentuate marked moles, for considering removal.
"""
import argparse
import os.path
import numpy |
import mel.lib.common
import mel.lib.fullscreenui
import mel.lib.image
import mel.lib.math
import mel.lib.u | i
import mel.rotomap.display
import mel.rotomap.mask
import mel.rotomap.moles
import mel.rotomap.relate
# Radius within which we should look for moles, in later work perhaps we'll
# make this configurable by the user.
_MAGIC_MOLE_FINDER_RADIUS = 50
def setup_parser(parser):
parser.add_argument(
"ROTOMAP",
type=mel.rotomap.moles.make_argparse_rotomap_directory,
nargs="+",
help="A list of paths to rotomaps.",
)
parser.add_argument(
"--follow",
type=str,
default=None,
help="UUID of a mole to follow, try to jump to it in the first set.",
)
parser.add_argument(
"--copy-to-clipboard",
action="store_true",
help="Copy UUID to the clipboard, as well as printing. Mac OSX only.",
)
parser.add_argument(
"--advance-n-frames",
"--skip",
type=int,
metavar="N",
default=None,
help="Start with the image with the specified index, instead of 0.",
)
parser.add_argument(
"--visit-list-file",
type=argparse.FileType(),
metavar="PATH",
help=(
"Use keys to jump through this list of this form: "
"'path/to/jpg:hash:optional co-ords'."
),
)
class MoveController:
def __init__(self):
self.status = "Move mode"
def on_lbutton_down_noflags(self, editor, mouse_x, mouse_y):
editor.move_nearest_mole(mouse_x, mouse_y)
return True
def on_key(self, editor, key):
pass
class FollowController:
def __init__(self, editor, follow, mole_uuid_list):
self.mole_uuid_list = mole_uuid_list
if follow:
self.mole_uuid_list[0] = follow
editor.skip_to_mole(self.mole_uuid_list[0])
editor.follow(self.mole_uuid_list[0])
self.is_paste_mode = False
self.update_status()
def on_lbutton_down_noflags(self, editor, mouse_x, mouse_y):
editor.crud_mole(self.mole_uuid_list[0], mouse_x, mouse_y)
editor.follow(self.mole_uuid_list[0])
return True
def pre_key(self, editor, key):
self._prev_moles = editor.moledata.moles
def on_key(self, editor, key):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
arrows = [
pygame.K_UP,
pygame.K_DOWN,
pygame.K_LEFT,
pygame.K_RIGHT,
]
if key in arrows:
update_follow(
editor,
self.mole_uuid_list[0],
self._prev_moles,
self.is_paste_mode,
)
elif key == pygame.K_p:
self.is_paste_mode = not self.is_paste_mode
self.update_status()
editor.set_status(self.status)
editor.show_current()
def update_status(self):
if self.is_paste_mode:
self.status = "follow paste mode"
else:
self.status = "follow mode"
class MoleEditController:
def __init__(self, editor, follow, copy_to_clipboard):
self.mole_uuid_list = [None]
self.follow_controller = FollowController(
editor, follow, self.mole_uuid_list
)
self.move_controller = MoveController()
self.sub_controller = None
self.mouse_x = 0
self.mouse_y = 0
self.copy_to_clipboard = copy_to_clipboard
def on_mouse_event(self, editor, event):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
self.mouse_x, self.mouse_y = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.on_lbutton_down(editor, self.mouse_x, self.mouse_y)
elif event.button == 3:
self.on_rbutton_down(editor, self.mouse_x, self.mouse_y)
def on_lbutton_down(self, editor, mouse_x, mouse_y):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
key_mods = pygame.key.get_mods()
if key_mods & pygame.KMOD_ALT:
if key_mods & pygame.KMOD_SHIFT:
self.mole_uuid_list[0] = editor.get_mole_uuid(mouse_x, mouse_y)
print(self.mole_uuid_list[0])
if self.copy_to_clipboard:
mel.lib.ui.set_clipboard_contents(self.mole_uuid_list[0])
else:
editor.set_mole_uuid(mouse_x, mouse_y, self.mole_uuid_list[0])
elif key_mods & pygame.KMOD_SHIFT:
editor.remove_mole(mouse_x, mouse_y)
else:
if self.sub_controller:
if self.sub_controller.on_lbutton_down_noflags(
editor, mouse_x, mouse_y
):
return
editor.add_mole(mouse_x, mouse_y)
def on_rbutton_down(self, editor, mouse_x, mouse_y):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
key_mods = pygame.key.get_mods()
if key_mods & pygame.KMOD_ALT:
if key_mods & pygame.KMOD_SHIFT:
editor.confirm_mole(mouse_x, mouse_y)
elif key_mods & pygame.KMOD_SHIFT:
editor.set_mole_uuid(
mouse_x,
mouse_y,
mel.rotomap.moles.make_new_uuid(),
is_canonical=False,
)
def pre_key(self, editor, key):
if self.sub_controller:
try:
self.sub_controller.pre_key(editor, key)
except AttributeError:
pass
def on_key(self, editor, key):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
if key == pygame.K_o:
is_follow = self.sub_controller is self.follow_controller
if not is_follow and self.mole_uuid_list[0]:
self.sub_controller = self.follow_controller
editor.set_status(self.sub_controller.status)
else:
self.sub_controller = None
editor.set_status(" |
sandeep048/python-saml | src/onelogin/saml2/auth.py | Python | bsd-3-clause | 16,929 | 0.002599 | # -*- coding: utf-8 -*-
""" OneLogin_Saml2_Auth class
Copyright (c) 2014, OneLogin, Inc.
All rights reserved.
Main class of OneLogin's Python Toolkit.
Initializes the SP SAML instance
"""
from base64 import b64encode
from urllib import quote_plus
import dm.xmlsec.binding as xmlsec
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from onelogin.saml2.response import OneLogin_Saml2_Response
from onelogin.saml2.errors import OneLogin_Saml2_Error
from onelogin.saml2.logout_response import OneLogin_Saml2_Logout_Response
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from onelogin.saml2.logout_request import OneLogin_Saml2_Logout_Request
from onelogin.saml2.authn_request import OneLogin_Saml2_Authn_Request
class OneLogin_Saml2_Auth(object):
"""
This class implements the SP SAML instance.
Defines the methods that you can invoke in your application in
order to add SAML support (initiates sso, initiates slo, processes a
SAML Response, a Logout Request or a Logout Response).
"""
def __init__(self, request_data, old_settings=None, custom_base_path=None):
"""
Initializes the SP SAML instance.
:param request_data: Request Data
:type request_data: dict
:param settings: Optional. SAML Toolkit Settings
:type settings: dict|object
:param custom_base_path: Optional. Path where are stored the settings file and the cert folder
:type custom_base_path: string
"""
self.__request_data = request_data
self.__settings = OneLogin_Saml2_Settings(old_settings, custom_base_path)
self.__attributes = []
self.__nameid = None
self.__session_index = None
self.__session_expiration = None
self.__authenticated = False
self.__errors = []
self.__error_reason = None
self.__last_request_id = None
def get_settings(self):
"""
Returns the settings info
:return: Setting info
:rtype: OneLogin_Saml2_Setting object
"""
return self.__settings
def set_strict(self, value):
"""
Set the strict mode active/disable
:param value:
:type value: bool
"""
assert isinstance(value, bool)
self.__settings.set_strict(value)
def process_response(self, request_id=None):
"""
Process the SAML Response sent by the IdP.
:param request_id: Is an optional argumen. Is the ID of the AuthNRequest sent by this SP to the IdP.
:type request_id: string
:raises: OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND, when a POST with a SAMLResponse is not found
"""
self.__errors = []
if 'post_data' in self.__request_data and 'SAMLResponse' in self.__request_data['post_data']:
# AuthnResponse -- HTTP_POST Binding
response = OneLogin_Saml2_Response(self.__settings, self.__request_data['post_data']['SAMLResponse'])
if response.is_valid(self.__request_data, request_id):
self.__attributes = response.get_attributes()
self.__nameid = response.get_nameid()
self.__session_index = response.get_session_index()
self.__session_expiration = response.get_session_not_on_or_after()
self.__authenticated = True
else:
self.__errors.append('invalid_response')
self.__error_reason = response.get_error()
else:
self.__errors.append('invalid_binding')
raise OneLogin_Saml2_Error(
'SAML Response not found, Only supported HTTP_POST Binding',
OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND
)
def process_slo(self, keep_local_session=False, request_id=None, delete_session_cb=None):
"""
Process the SAML Logout Response / Logout Request sent by the IdP.
:param keep_local_session: When false will destroy the local session, otherwise will destroy it
:type keep_local_session: bool
:param request_id: The ID of the LogoutRequest sent by this SP to the IdP
:type request_id: string
:returns: Redirection url
"""
self.__errors = []
if 'get_data' in self.__request_data and 'SAMLResponse' in self.__request_data['get_data']:
logout_response = OneLogin_Saml2_Logout_Response(self.__settings, self.__request_data['get_data']['SAMLResponse'])
if not logout_response.is_valid(self.__request_data, request_id):
self.__errors.append('invalid_logout_response')
self.__error_reason = log | out_response.get_error()
elif logout_response.get_status() != OneLogin_Saml2_Constants.STATUS_SUCCESS:
self.__errors.append('logout_not_success')
elif not keep_local_session:
OneLogin_Saml2_Utils.delete_local_session(delete_session_cb)
elif 'g | et_data' in self.__request_data and 'SAMLRequest' in self.__request_data['get_data']:
logout_request = OneLogin_Saml2_Logout_Request(self.__settings, self.__request_data['get_data']['SAMLRequest'])
if not logout_request.is_valid(self.__request_data):
self.__errors.append('invalid_logout_request')
self.__error_reason = logout_request.get_error()
else:
if not keep_local_session:
OneLogin_Saml2_Utils.delete_local_session(delete_session_cb)
in_response_to = logout_request.id
response_builder = OneLogin_Saml2_Logout_Response(self.__settings)
response_builder.build(in_response_to)
logout_response = response_builder.get_response()
parameters = {'SAMLResponse': logout_response}
if 'RelayState' in self.__request_data['get_data']:
parameters['RelayState'] = self.__request_data['get_data']['RelayState']
else:
parameters['RelayState'] = OneLogin_Saml2_Utils.get_self_url_no_query(self.__request_data)
security = self.__settings.get_security_data()
if 'logoutResponseSigned' in security and security['logoutResponseSigned']:
parameters['SigAlg'] = security['signatureAlgorithm']
parameters['Signature'] = self.build_response_signature(logout_response, parameters.get('RelayState', None), security['signatureAlgorithm'])
return self.redirect_to(self.get_slo_url(), parameters)
else:
self.__errors.append('invalid_binding')
raise OneLogin_Saml2_Error(
'SAML LogoutRequest/LogoutResponse not found. Only supported HTTP_REDIRECT Binding',
OneLogin_Saml2_Error.SAML_LOGOUTMESSAGE_NOT_FOUND
)
def redirect_to(self, url=None, parameters={}):
"""
Redirects the user to the url past by parameter or to the url that we defined in our SSO Request.
:param url: The target URL to redirect the user
:type url: string
:param parameters: Extra parameters to be passed as part of the url
:type parameters: dict
:returns: Redirection url
"""
if url is None and 'RelayState' in self.__request_data['get_data']:
url = self.__request_data['get_data']['RelayState']
return OneLogin_Saml2_Utils.redirect(url, parameters, request_data=self.__request_data)
def is_authenticated(self):
"""
Checks if the user is authenticated or not.
:returns: True if is authenticated, False if not
:rtype: bool
"""
return self.__authenticated
def get_attributes(self):
"""
Returns the set of SAML attributes.
:returns: SAML attributes
:rtype: dict
"""
return self.__attributes
def get_nameid(self):
"""
Returns the nameID.
:returns: NameID
:rtype: string
"""
return self.__nam |
fierval/KaggleMalware | Learning/SupervisedLearning.py | Python | mit | 5,930 | 0.005059 | import numpy as np
from sklearn.grid_search import GridSearchCV
import sklearn.metrics as metrics
from sklearn import preprocessing as prep
from tr_utils import merge_two_dicts, isEmpty
class SKSupervisedLearning (object):
"""
Thin wrapper around some learning methods
"""
def __init__(self, classifier, X_train, Y_train, X_test, Y_test):
"""
X_train, Y_train - training data: examples + corresponding class labels
X_test, Y_test - validation data: examples + corresponding class labels
"""
self.X_train = X_train
self.X_test = X_test
self.Y_train = Y_train
self.Y_test = Y_test
self.X_train_scaled = np.array([])
self.X_test_scaled = np.array([])
self._classifier = classifier
self._clf = None
self._proba_train = None
self._proba_test = None
self._train_params = None
self._estimation_params = None
self._scaler = None
# parameters for sklearn grid search
self._jobs = -1
self._cv = 10
self._verbose = 0
self._scoring = "log_loss"
@property
def scaler(self):
return self._scaler
@property
def clf(self):
if self._clf == None:
self._clf = self._classifier(**self.train_params) if self.train_params != None else self._classifier()
return self._clf
@property
def proba_train(self):
return self._proba_train
@property
def proba_test(self):
return self._proba_test
@property
def train_params(self):
"""
Training parameter dictionary specific to each learner
"""
return self._train_params
@train_params.setter
def train_params(self, val):
self._train_params = val
@property
def estimation_params(self):
"""
Dictionary of paramters to estimate, specific to each learner:
e.g.:
{'gamma': [0.001, 0.1, 1], 'C': [1, 10, 100]}
"""
return self._estimation_params
@estimation_params.setter
def estimation_params(self, val):
self._estimation_params = val
@property
def jobs(self):
return self._jobs
@jobs.setter
def jobs(self, val):
self._jobs = val
@property
def cv(self):
return self._cv
@cv.setter
def cv(self, val):
self._cv = val
@property
def scoring(self):
return self._scoring
@scoring.setter
def scoring(self, val):
self._scoring = val
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, val):
self._verbose = val
@property
def proba_train(self):
return self._proba_train
@property
def proba_test(self):
return self._proba_test
def _pick_examples(self):
'''
If we have scaled examples - pick them, else pick X_train, X_test
'''
return (self.X_train, self.X_test) \
if isEmpty(self.X_train_scaled) or isEmpty(self.X_test_scaled) \
else (self.X_train_scaled, self.X_test_scaled)
def remove_scaling(self):
self.X_test_scaled = None
self.X_train_scaled = None
def grid_search_classifier(self) :
"""
Grid search for the best classifier, given parameters.
Returns best score
Sets the classifier to the best classifier given training and estimation parameters
See sklearn GridSearchCV for details
"""
gs = False
if self.train_params == None and self.estimation_params == None:
raise AttributeError("Cannot have train_params and estimation_params both absent")
# first - grid-search for the best parameters
if self.estimation_params:
X_train, X_test = self._pick_examples()
Y_train = self.Y_train
clf = self._classifier(**self.train_params) if self.train_params != None else self._classifier()
gs = GridSearchCV(clf, self.estimation_params, scoring = self.scoring, cv = self.cv, n_jobs=self.jobs, verbose = self.verbose)
gs.fit(X_train, Y_train)
print gs.best_params_
print gs.best_score_
# if we have specified parameters of our own - we need to add those
if gs:
self.train_params = merge_two_dicts(gs.best_params_, self.train_params) if self.train_params != None else gs.best_params_
self._clf = self._classifier(**self.train_params)
return gs.best_score_
def _fit_scaler(self, scaler_class, X):
return scaler_class().fit(X)
# TODO: other scalers?
def fit_standard_scaler(self):
"""
Standard scaler scales samples 'vertically', (by feature), by removing the mean and reducing to unit std.
Computes a scaler and transforms both train and | validation sets based upon it
"""
self._scaler = self._fit_scaler(prep.StandardScaler, self.X_train)
self.X_train_scaled = self._scaler.transform(self.X_train)
self.X_test_scaled = self._scaler.transform(self.X_test)
def fit_and_validate(self):
'''
Returns training & testing log loss
'''
X_train, X_test = self._pick_examples()
# shorthand
Y_train = self.Y_train
| Y_test = self.Y_test
self.clf.fit(X_train, Y_train)
# get probabilities
self._proba_train = self.clf.predict_proba(X_train)
self._proba_test = self.clf.predict_proba(X_test)
return metrics.log_loss(Y_train, self.proba_train), np.array([]) if isEmpty(Y_test) else metrics.log_loss(Y_test, self.proba_test)
def predict_actual(self, X_actual_test):
'''
Return actual prediction on a set where we don't have labels
'''
return self.clf.predict_proba(X_actual_test) |
mrGeen/cython | Cython/Compiler/ExprNodes.py | Python | apache-2.0 | 498,361 | 0.00197 | #
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object, _py_int_types=object,
IS_PYTHON3=cython.bint)
import sys
import copy
import os.path
import operator
from .Errors import error, warning, warn_once, InternalError, CompileError
from .Errors import hold_errors, release_errors, held_errors, report_error
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node, utility_code_for_imports
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from | __built | in__ import basestring
except ImportError:
# Python 3
basestring = str
any_string_type = (bytes, str)
else:
# Python 2
any_string_type = (bytes, unicode)
if sys.version_info[0] >= 3:
IS_PYTHON3 = True
_py_int_types = int
else:
IS_PYTHON3 = False
_py_int_types = (int, long)
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
" This is not portable and requires explicit encoding."),
(unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
" This is not portable to Py3."),
(bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
(basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(str_type, unicode_type): ("str objects do not support coercion to unicode,"
" use a unicode string literal instead (u'')"),
(str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
(str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"'str' objects do not support coercion to C types (use 'unicode'?)."),
(PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_char_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
(PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif (env.directives['c_string_encoding'] and
any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if node is None or (
not isinstance(node.constant_result, _py_int_types) and
not isinstance(node.constant_result, float)):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
|
phievo/phievo | phievo/AnalysisTools/main_functions.py | Python | lgpl-3.0 | 7,449 | 0.015573 | import numpy as np
import shelve
import sys,os,glob,pickle,zipfile,re
from urllib.request import urlretrieve
from phievo.AnalysisTools import palette
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def read_network(filename,verbose=False):
"""Retrieve a whole network from a pickle object named filename
Args:
filename (str): the directory where the object is saved
Returns:
The stored network
"""
with open(filename,'rb') as my_file:
net = pickle.load(my_file)
if verbose:
print("Network retrieve from: {}".format(filename))
return net
def smoothing(array,param):
"""Smoothen an array by averaging over the neighbourhood
Args:
array (list): the to be smoothed array
param (int): the distance of the neighbourhood
Returns:
list of same size as array
"""
length = len(array)
return [np.mean(array[max(0,i-param):i+param+1]) for i in range(length)]
def load_generation_data(generations,restart_file):
"""
Searches in the restart file the the informations that has been backed up
up about the individuals at a given generations.
Args:
generations (list): index of the generations to load_generation_data
restart_file: path of the restart_file
Returns:
dictionary where each key contains the informations about one generation.
"""
gen_data = {}
with shelve.open(restart_file) as data:
restart_generations = sorted([int(xx) for xx in data.dict.keys()])
for gen in generations:
if gen not in restart_generations:
limit_print = 20
err_str = ""
err_str += "Generation {0} is not saved in the restart file.\n".format(gen)
err_str += "Please choose among the following generations:\n"
if len(restart_generations)<limit_print:
err_str+=", ".join([str(x) for x in restart_generations[:limit_print]])+"\n"
else:
err_str+=", ".join([str(x) for x in restart_generations[:limit_print]])+", etc.\n"
raise AssertionError(err_str)
dummy,gen_data[gen] = data[str(gen)]
return gen_data
def download_zip(dir_name,url):
"""
Download and extract zip file to dir_name.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
else:
print("The directory {} already exists, download_example_seed cannot overwrite it.".format(dir_name))
return 0
## Downloading zipfile
zip_path = os.path.join(dir_name,dir_name+".zip")
def dlProgress(count, blockSize, totalSize):
state = int(count * blockSize * 100 / totalSize)
if state%2==0:
print("{}: [".format(d | ir_name+".zip")+("#"*int(state/2))+(" "*(50-int(state/2)))+"] {}%".format(state),end="\r")
urlretrieve(url,zip_path,reporthook=dlProgress)
print("{}: [".format(dir_name+" | .zip")+("#"*50)+"] 100%",end="\n")
## unziping file
print("Extracting zip file...",end="\r")
zip_ref = zipfile.ZipFile(zip_path, 'r')
zip_ref.extractall(dir_name)
zip_ref.close()
print("Extracting zip file... done.",end="\n")
print("Deleting zip file...",end="\r")
os.remove(zip_path)
print("Deleting zip file... done.",end="\n")
return 1
def download_tools(run_evolution="run_evolution.py",AnalyseRun="AnalyseRun.ipynb",ProjectCreator="ProjectCreator.ipynb"):
url_runevo = "https://raw.githubusercontent.com/phievo/phievo/master/run_evolution.py"
url_jpnb = "https://github.com/phievo/phievo/raw/master/AnalyzeRun.ipynb"
url_confnb = "https://github.com/phievo/phievo/raw/master/ProjectCreator.ipynb"
urlretrieve(url_runevo,run_evolution)
print("run_evolution.py ... downloaded.")
urlretrieve(url_jpnb,AnalyseRun)
print("AnalyseRun.ipynb ... downloaded.")
urlretrieve(url_confnb,ProjectCreator)
print("ProjectCreator.ipynb ... downloaded.")
def download_example(example_name,directory=None):
"""
Download an example seed or project.
"""
#server_address = "http://www.physics.mcgill.ca/~henrya/seeds_phievo/{}"
server_examples = "https://github.com/phievo/phievo/blob/master/Examples/{}?raw=true"
existing_examples = {
"adaptation":"adaptation.zip",
"somite":"Somites.zip",
"hox":"StaticHox.zip",
"hox_pareto":"StaticHox_pareto.zip",
"lac_operon":"lac_operon.zip",
"immune":"immune.zip",
"minimal_project":"minimal_project.zip",
}
server_seed = "https://github.com/phievo/simulation_examples/blob/master/{}?raw=true"
existing_seeds = {
"seed_adaptation":"adaptation.zip",
"seed_adaptation_pruning":"adaptation_pruning.zip",
"seed_lacOperon":"lacOperon.zip",
"seed_lacOperon_pruning":"lacOperon_pruning.zip",
"seed_somite":"somite.zip",
"seed_somite_pruning":"somite_pruning.zip",
"seed_hox_pareto_light":"hox_pareto_light.zip",
}
with_seed = False
if "seed" in example_name:
with_seed = True
try:
zip_name = existing_seeds[example_name]
example_name = example_name[5:]
url = server_seed.format(zip_name)
except KeyError:
print("Example {} is not available.".format(example_name))
print("Only the following examples are available:\n\t- "+"\n\t- ".join(list(existing_examples.keys())+list(existing_seeds.keys())))
return None
else:
try:
zip_name = existing_examples[example_name]
url = server_examples.format(zip_name)
except KeyError:
print("Example {} is not available.".format(example_name))
print("Only the following examples are available:\n\t- "+"\n\t- ".join(list(existing_examples.keys())+list(existing_seeds.keys())))
return None
if not directory:
directory = "example_{}".format(example_name)
res = download_zip(directory,url)
if not res:
return None
if with_seed:
seed_name = os.path.join(directory,"Seed{}".format(example_name))
os.makedirs(seed_name)
files = glob.glob(os.path.join(directory,"*"))
files.remove(seed_name)
for filename in files :
try:
os.rename(filename, filename.replace(directory,seed_name))
except OSError:
import pdb;pdb.set_trace()
print("recovering log files...",end="\r")
for log_f in glob.glob(os.path.join(seed_name,"log_*")):
f_name = log_f.split(os.sep)[-1]
f_name = f_name.replace("log_","")
os.rename(log_f, os.path.join(directory,f_name))
with open(os.path.join(directory,"init_file.py"),"r") as init_file:
init_text = init_file.read()
init_text = re.sub("(cfile\[[\'\"](\w+)[\'\"]]\s*=\s*).+",r"\1'\2.c'",init_text)
init_text = re.sub("(pfile\[[\'\"](\w+)[\'\"]]\s*=\s*).+",r"\1'\2.py'",init_text)
with open(os.path.join(directory,"init_file.py"),"w") as init_file:
init_file.write(init_text)
print("recovering log files... done.",end="\n")
print("Project saved in {}.".format(directory))
|
rh-s/heat | heat/common/config.py | Python | apache-2.0 | 19,187 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Heat
"""
import logging as sys_logging
import os
from eventlet.green import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import wsgi
LOG = logging.getLogger(__name__)
paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_("The flavor to use.")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use."))]
service_opts = [
cfg.IntOpt('periodic_interval',
default=60,
help=_('Seconds between running periodic tasks.')),
cfg.StrOpt('heat_metadata_server_url',
default="",
help=_('URL of the Heat metadata server.')),
cfg.StrOpt('heat_waitcondition_server_url',
help=_('URL of the Heat waitcondition server.')),
cfg.StrOpt('heat_watch_server_url',
default="",
help=_('URL of the Heat CloudWatch server.')),
cfg.StrOpt('instance_connection_is_secure',
default="0",
help=_('Instance connection to CFN/CW API via https.')),
cfg.StrOpt('instance_connection_https_validate_certificates',
default="1",
help=_('Instance connection to CFN/CW API validate certs if '
'SSL is used.')),
cfg.StrOpt('region_name_for_services',
help=_('Default region name used to get services endpoints.')),
cfg.StrOpt('heat_stack_user_role',
default="heat_stack_user",
help=_('Keystone role for heat template-defined users.')),
cfg.StrOpt('stack_user_domain_id',
deprecated_opts=[cfg.DeprecatedOpt('stack_user_domain',
group=None)],
help=_('Keystone domain ID which contains heat '
'template-defined users. If this option is set, '
'stack_user_domain_name option will be ignored.')),
cfg.StrOpt('stack_user_domain_name',
| help=_('Keystone domain name which contains heat '
'template-defined users. If `stack_user_domain_id` '
'option is set, this option is ignored.')),
cfg.StrOpt('stack_domain_admin',
help=_('Keystone username, a user with roles sufficient to '
'manage users and projects in the stack_user_domain.')),
cfg.StrOpt('stack_domain_admin_password',
secret=True, |
help=_('Keystone password for stack_domain_admin user.')),
cfg.IntOpt('max_template_size',
default=524288,
help=_('Maximum raw byte size of any template.')),
cfg.IntOpt('max_nested_stack_depth',
default=5,
help=_('Maximum depth allowed when using nested stacks.')),
cfg.IntOpt('num_engine_workers',
default=processutils.get_worker_count(),
help=_('Number of heat-engine processes to fork and run.'))]
engine_opts = [
cfg.StrOpt('instance_user',
default='',
help=_("The default user for new instances. This option "
"is deprecated and will be removed in the Juno release. "
"If it's empty, Heat will use the default user set up "
"with your cloud image (for OS::Nova::Server) or "
"'ec2-user' (for AWS::EC2::Instance).")),
cfg.ListOpt('plugin_dirs',
default=['/usr/lib64/heat', '/usr/lib/heat',
'/usr/local/lib/heat', '/usr/local/lib64/heat'],
help=_('List of directories to search for plug-ins.')),
cfg.StrOpt('environment_dir',
default='/etc/heat/environment.d',
help=_('The directory to search for environment files.')),
cfg.StrOpt('deferred_auth_method',
choices=['password', 'trusts'],
default='trusts',
help=_('Select deferred auth method, '
'stored password or trusts.')),
cfg.ListOpt('trusts_delegated_roles',
default=[],
help=_('Subset of trustor roles to be delegated to heat.'
' If left unset, all roles of a user will be'
' delegated to heat when creating a stack.')),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help=_('Maximum resources allowed per top-level stack. '
'-1 stands for unlimited.')),
cfg.IntOpt('max_stacks_per_tenant',
default=100,
help=_('Maximum number of stacks any one tenant may have'
' active at one time.')),
cfg.IntOpt('action_retry_limit',
default=5,
help=_('Number of times to retry to bring a '
'resource to a non-error state. Set to 0 to disable '
'retries.')),
cfg.IntOpt('event_purge_batch_size',
default=10,
help=_("Controls how many events will be pruned whenever a "
"stack's events exceed max_events_per_stack. Set this "
"lower to keep more events at the expense of more "
"frequent purges.")),
cfg.IntOpt('max_events_per_stack',
default=1000,
help=_('Maximum events that will be available per stack. Older'
' events will be deleted when this is reached. Set to 0'
' for unlimited events per stack.')),
cfg.IntOpt('stack_action_timeout',
default=3600,
help=_('Timeout in seconds for stack action (ie. create or'
' update).')),
cfg.IntOpt('error_wait_time',
default=240,
help=_('Error wait time in seconds for stack action (ie. create'
' or update).')),
cfg.IntOpt('engine_life_check_timeout',
default=2,
help=_('RPC timeout for the engine liveness check that is used'
' for stack locking.')),
cfg.BoolOpt('enable_cloud_watch_lite',
default=False,
help=_('Enable the legacy OS::Heat::CWLiteAlarm resource.')),
cfg.BoolOpt('enable_stack_abandon',
default=False,
help=_('Enable the preview Stack Abandon feature.')),
cfg.BoolOpt('enable_stack_adopt',
default=False,
help=_('Enable the preview Stack Adopt feature.')),
cfg.BoolOpt('convergence_engine',
default=False,
help=_('Enables engine with convergence architecture. All '
'stacks with this option will be created using '
'convergence engine .')),
cfg.StrOpt('default_software_config_transport',
choices=['POLL_SERVER_CFN',
'POLL_SERVER_HEAT',
'POLL_TEMP_URL'],
default='POLL_SERVER_CFN',
help=_('Template default for how the server should receive the '
'metadata required for software configuration. '
'POLL_SERVER_CFN will allow calls to the cfn API action '
'DescribeStackResource authenticated with the provided '
|
puentesarrin/asyncflux | setup.py | Python | apache-2.0 | 2,548 | 0.000785 | # -*- coding: utf-8 *-*
import os
import subprocess
import sys
try:
from setuptools import setup
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
from distutils.cmd import Command
with open('README.rst') as f:
readme_content = f.read()
class DocCommand(Command):
description = "generate or test documentation"
user_options = [("test", "t",
"run doctests instead of generating documentation")]
boolean_options = ["test"]
def initialize_options(self):
self.test = False
def finalize_options(self):
pass
def run(self):
if self.test:
path = "docs/_build/doctest"
mode = "doctest"
else:
path = "docs/_build/%s" % __version__
mode = "html"
try:
os.makedirs(path)
except:
pass
status = subprocess.call(["sphinx-build", "-E",
"-b", mode, "docs", path])
if status:
raise RuntimeError("documentation step '%s' failed" % (mode,))
sys.stdout.write("\nDocumentation step '%s' performed, results here:\n"
" %s/\n" % (mode, path))
setup(
name='asyncflux',
version='0.0+',
url='https://github.com/puentesarrin/asyncflux',
description='Asynchronous client for InfluxDB and Tornado.',
long_description=readme_content,
author='Jorge Puente-Sarrín',
author_email='puentesarrin@gmail.com',
packages=['asyncflux'],
keywords=['asyncflux', 'tornado', 'influxdb', 'influx', 'async'],
install_requires=['tornado>=3.0'],
license='Apache License, Version 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Lic | ense :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
| 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'],
test_suite='tests.runtests',
cmdclass={"doc": DocCommand}
)
|
plotly/python-api | packages/python/plotly/plotly/validators/scatter3d/marker/_line.py | Python | mit | 5,005 | 0.0002 | import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="scatter3d.marker", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an a | rray containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To contro | l the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
""",
),
**kwargs
)
|
tbeckham/DeploymentManager | resource_manager/server.py | Python | apache-2.0 | 658 | 0.00152 | #!/usr/bin/python
from eve import Eve
from eve.auth import BasicAuth
from flask_bootstrap import Bootstrap
from eve_docs import eve_docs
|
class ResourceManagerBasicAuth(BasicAuth):
def check_auth(self, username, password, allowed_roles, resource, me | thod):
if method == 'GET':
# Allow read-only access without a password
return True
else:
# all the other resources are secured
return username == 'admin' and password == 'admin'
app = Eve(settings="./api_config.py", auth=ResourceManagerBasicAuth)
Bootstrap(app)
app.register_blueprint(eve_docs, url_prefix='/docs')
app.run(host='0.0.0.0')
|
fbraem/mqweb | samples/python/channel_start.py | Python | mit | 1,189 | 0.023549 | '''
Starts a channel on a queuemanager
MQWeb runs on localhost and is listening on port 8081.
'''
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Start Channel',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
parser.add_argument('-c', '--channel', help='Name of the ch | annel', required=True)
args = parser.parse_args()
url = "/api/channel/start/" + args.queuemanager + '/' + args.channel;
try:
conn = httplib.HTTPConnection('localhost', 8081)
conn.request('GET', url)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code'])
)
else:
print "Start command succesfully send."
print "Note: this doesn't m | ean the channel is running! Use chstatus to get the status of the channel."
except httplib.HTTPException as e:
print ('An HTTP error occurred while starting a channel: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
|
ratoaq2/Flexget | flexget/ui/plugins/schedule/__init__.py | Python | mit | 91 | 0 | fro | m __future__ import unicode_literals, division, absolute_import
f | rom .schedule import *
|
Frumple/mrt-file-server | instance_template/production/config.py | Python | mit | 510 | 0.005882 | # Add settings here that are specific for live production environments
# These settings will override those set in default_config.py
# SECRET_KEY must be set to a random and unique phrase before running the application.
# See http://flask.pocoo.org/docs/0.12/quickstart/ on how to gene | rate a secret key.
# SECRET_KEY = ""
# Enable basic authentication for production
BASIC_AUTH_FORCE = True
# Uncomment and set your basic authentication credentials
# BASIC_AUTH_USERNAME = "files"
# BASIC_AUTH_PAS | SWORD = "" |
alxgu/ansible | lib/ansible/modules/cloud/vmware/vmware_host_acceptance.py | Python | gpl-3.0 | 6,958 | 0.002731 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_acceptance
short_description: Manage the host acceptance level of an ESXi host
description:
- This module can be used to manage the host acceptance level of an ESXi host.
- The host acceptance level controls the acceptance level of each VIB on a ESXi host.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Acceptance level of all ESXi host system in the given cluster will be managed.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Acceptance level of this ESXi host system will be managed.
- If C(cluster_name) is not given, this parameter is required.
state:
description:
- Set or list acceptance level of the given ESXi host.
- 'If set to C(list), then will return current acceptance level of given host system/s.'
- If set to C(present), then will set given acceptance level.
choices: [ list, present ]
required: False
default: 'list'
acceptance_level:
description:
- Name of acceptance level.
- If set to C(partner), then accept only partner and VMware signed and certified VIBs.
- If set to C(vmware_certified), then accept only VIBs that are signed and certified by VMware.
- If set to C(vmware_accepted), then accept VIBs that have been accepted by VMware.
- If set to C(community), then accept all VIBs, even those that are not signed.
choices: [ community, partner, vmware_accepted, vmware_certified ]
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set acceptance level to community for all ESXi Host in given Cluster
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
acceptance_level: 'community'
state: present
delegate_to: localhost
register: cluster_acceptance_level
- name: Set acceptance level to vmware_accepted for the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
acceptance_level: 'vmware_accepted'
state: present
delegate_to: localhost
register: host_acceptance_level
- name: Get acceptance level from the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: list
delegate_to: localhost
register: host_acceptance_level
'''
RETURN = r'''
facts:
description:
- dict with hostname as key and dict with acceptance level facts, error as value
returned: facts
type: dict
sample: { "facts": { "localhost.localdomain": { "error": "NA", "level": "vmware_certified" }}}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VMwareAccpetanceManager(PyVmomi):
def __init__(self, module):
super(VMwareAccpetanceManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.desired_state = self.params.get('state')
self.hosts_facts = {}
self.acceptance_level = self.params.get('acceptance_level')
def gather_acceptance_facts(self):
for host in self.hosts:
self.hosts_facts[host.name] = dict(level='', error='NA')
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
def set_acceptance_level(self):
change = []
for host in self.hosts:
host_changed = False
if self.hosts_facts[host.name]['level'] != self.acceptance_level:
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
if self.module.check_mode:
self.hosts_facts[host.name]['level'] = self.acceptance_level
else:
host_image_config_mgr.UpdateHostImageAcceptanceLevel(newAcceptanceLevel=self.acceptance_level)
| self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
host_changed = True
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
change.append(host_cha | nged)
self.module.exit_json(changed=any(change), facts=self.hosts_facts)
def check_acceptance_state(self):
self.gather_acceptance_facts()
if self.desired_state == 'list':
self.module.exit_json(changed=False, facts=self.hosts_facts)
self.set_acceptance_level()
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
acceptance_level=dict(type='str',
choices=['community', 'partner', 'vmware_accepted', 'vmware_certified']
),
state=dict(type='str',
choices=['list', 'present'],
default='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
required_if=[
['state', 'present', ['acceptance_level']],
],
supports_check_mode=True
)
vmware_host_accept_config = VMwareAccpetanceManager(module)
vmware_host_accept_config.check_acceptance_state()
if __name__ == "__main__":
main()
|
josepedro/acidentes_em_rodovias_refatoracao | acidentes_em_rodovias/app/models/ocorrencia_basica.py | Python | gpl-3.0 | 1,059 | 0.009506 | # -*- coding: utf-8 -*-
#
# Universidade de Brasilia - FGA
# Técnicas de Programação, 1/2014
#
# Acidentes em Rodovias, 2013-2014
# GitHub: https://github.com/josepedro/acidentes_em_rodovias_refatoracao
#
"""@package Municipios
Declaração das classes para ocorrencias.
Este modulo contem declação da classe de modelo
para Ocorrencias basicas
"""
from app.models.municipio import Municipio
class OcorrenciaBasica(Municipio):
""" Basic Occurrences """
def __init__(self):
## Occurrences' id table
self.ocoid = ''
## Occurrences' date | table
self.ocodataocorrencia = ''
## Occurrences register's date table
self.ocodataregistro = ''
## Denomination table
| self.tcodescricao = ''
## Accident type table
self.ttadescricao = ''
## Accident cause table
self.tcadescricao = ''
## Local of hightway
self.lbrbr = ''
## Vehicle's brand table
self.tmvdescricao = ''
## Vehicle's table
self.tvvdescricao = ''
|
raymondxyang/tensorflow | tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py | Python | apache-2.0 | 78,772 | 0.003504 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceQueueingStateSaver and wrappers.
Please see the reading data how-to for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_restore_sparse = sparse_ops._take_many_sparse_from_tensors_map
_store_sparse = sparse_ops._add_many_sparse_to_tensors_map
# pylint: enable=protected-access
class _SequenceInputWrapper(object):
"""A wrapper object for storing sequence-related input.
The SequenceInputWapper accepts four objects:
length: A scalar int containing the length of the input sequence.
key: A scalar string containing the unique key of the input sequence.
sequences: A dict mapping labels, like `input`, to tensors
whose initial index dimension is at least size `length`.
context: A dict mapping labels, like `global_target`, to tensors
that represent data across the entire example.
"""
def __init__(self, length, key, sequences, context):
length = ops.convert_to_tensor(length, name="length")
key = ops.convert_to_tensor(key, name="key")
if not isinstance(sequences, dict):
raise TypeError("sequences must be a dict")
if not isinstance(context, dict):
raise TypeError("context must be a dict")
if not sequences:
raise ValueError("must have at least one sequence tensor")
for k in sequences.keys():
if not isinstance(k, six.string_types):
raise TypeError("sequence key must be string: %s" % k)
if ":" in k:
raise ValueError("sequence key may not have a colon: '%s'" % k)
for k in context.keys():
if not isinstance(k, six.string_types):
raise TypeError("context key must be string: %s" % k)
if ":" in k:
raise ValueError("context key may not have a colon: '%s'" % k)
sequences = dict((k, ops.convert_to_tensor(
v, name="sequence_%s" % k)) for k, v in sequences.items())
context = dict((k, ops.convert_to_tensor(
v, name="context_%s" % k)) for k, v in context.items())
self._length = length
self._key = key
self._sequences = sequences
self._context = context
@property
def length(self):
return self._length
@property
def key(self):
return self._key
@property
def sequences(self):
return self._sequences
@property
def context(self):
return self._context
def _check_multiple_of(value, multiple_of):
"""Checks that value `value` is a non-zero multiple of `multiple_of`.
Args:
value: an int32 scalar Tensor.
multiple_of: an int or int32 scalar Tensor.
Returns:
new_value: an int32 scalar Tensor matching `value`, but which includes an
assertion that `value` is a multiple of `multiple_of`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(math_ops.mod(value, multiple_of), 0),
math_ops.not_equal(value, 0)), [
string_ops.string_join([
"Tensor %s should be a multiple of: " % value.name,
string_ops.as_string(multiple_of), ", but saw value: ",
string_ops.as_string(value),
". Consider setting pad=True."
])
])
]):
new_value = array_ops.identity(value, name="multiple_of_checked")
return new_value
def _check_rank(value, expected_rank):
"""Check the rank of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_rank: int32 scalar (optionally a `Tensor`).
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its rank. If expected_rank is not a `Tensor`, then
new_value's shape's rank has been set.
Raises:
ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
is known and is not equal to `expected_rank`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_rank, array_ops.rank(value)), [
string_ops.string_join([
"Rank of tensor %s should be: " % value.name,
string_ops.as_string(expected_rank), ", shape received:"
]), array_ops.shape(value)
])
]):
new_value = array_ops.identity(value, name="rank_checked")
if isinstance(expected_rank, ops.Tensor):
expected_rank_value = tensor_util.constant_value(expected_rank)
if expected_rank_value is not None:
expected_rank = int(expected_rank_value)
if not isinstance(expected_rank, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
except ValueError as e:
raise ValueError("Rank check failed for %s: %s" % (value.name, str(e)))
return new_value
def _check_shape(value, expected_shape):
"""Check the shape of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_shape: a `TensorShape`, list of `int32`, or a vector `Tensor`.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_shape is not a `Tensor`, then
new_value's shape has b | een set.
Raises:
ValueError: if `expected_shape` is not a `Tensor` and the shape of `value`
| is known and is not equal to `expected_shape`.
"""
assert isinstance(value, ops.Tensor)
if isinstance(expected_shape, tensor_shape.TensorShape):
expected_shape = expected_shape.as_list()
if isinstance(expected_shape, ops.Tensor):
expected_shape_value = tensor_util.constant_value(expected_shape)
if expected_shape_value is not None:
expected_shape = [int(d) for d in expected_shape_value]
if isinstance(expected_shape, ops.Tensor):
value = _check_rank(value, array_ops.size(expected_shape))
else:
value = _check_rank(value, len(expected_shape))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.equal(expected_shape, array_ops.shape(value))), [
string_ops.string_join([
"Shape of tensor %s should be: " % value.name,
string_ops.as_string(expected_shape),
", shape received: ",
string_ops.as_string(array_ops.shape(value))
])
])
]):
new_value = array_ops.identity(value, name="shap |
cedricbonhomme/k-means-clustering | recommend.py | Python | mit | 2,926 | 0.005126 | #! /usr/bin/python
#-*- coding:utf-8 -*
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.3 $"
__date__ = "$Date: 2015/08/31$"
__revision__ = "$Date: 2015/10/26 $"
__copyright__ = ""
__license__ = ""
import sys
import requests
import json
import pickle
import clusters
import lis | t_clusters
def recommend(user_id, recommender_nickname, recommender_password,
| service_url):
"""
Select talks/booths/poster to recommend from clusters.
"""
recommended_talks = []
request = requests.get(service_url + "/api/v1.0/profiles.json/" + user_id,
auth=(recommender_nickname, recommender_password))
if request.status_code == 200:
program = json.loads(request.text)["program"]
progam_id = [talk["id"] for talk in program]
with open("clusters", 'rb') as f:
kclust = pickle.load(f)
rownames, colnames, data = clusters.readfile("vectors.txt")
for idx, cluster in enumerate(kclust):
current_cluster = [rownames[r] for r in cluster]
if set(progam_id).intersection(current_cluster):
recommended_talks.extend(current_cluster)
else:
print(request.reason)
return recommended_talks
def update_profile_with_recommendations(user_id, recommender_nickname,
recommender_password,
recommended_talks):
"""
Update the profile of the user with the previously calculated
recommendations.
"""
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
data = []
for talk_id in recommended_talks:
data.append({"id":talk_id})
payload = {"op":"add", "path":"/recommended_talks", "value":data}
r = requests.patch(service_url + "/api/v1.0/profiles.json/" + user_id,
auth=(recommender_nickname, recommender_password),
headers=headers, data=json.dumps(payload))
if r.status_code == 201:
print("Profile updated.")
else:
print r.reason
print r.text
if __name__ == "__main__":
# Point of entry in execution mode
user_id = sys.argv[1]
recommender_nickname = sys.argv[2]
recommender_password = sys.argv[3]
try:
service_url = sys.argv[4]
except Exception as e:
service_url = ""
recommended_talks = recommend(user_id, recommender_nickname,
recommender_password, service_url)
if len(recommended_talks) != 0:
print("Talks to recommend:")
print(recommended_talks)
print("Updating profile...")
update_profile_with_recommendations(user_id, recommender_nickname,
recommender_password,
recommended_talks)
else:
print("Nothing to recommend.")
|
antoinecarme/pyaf | tests/model_control/detailed/transf_None/model_control_one_enabled_None_Lag1Trend_Seasonal_DayOfWeek_AR.py | Python | bsd-3-clause | 155 | 0.051613 | import tests.model_control.test_o | zone_custom_models_enabled as testmod
testmod.build_model( ['None'] , | ['Lag1Trend'] , ['Seasonal_DayOfWeek'] , ['AR'] ); |
py-in-the-sky/challenges | intermediate_words_search_python/shortest_path_search.py | Python | mit | 1,749 | 0.004574 | # imports from solution
from utils import underestimate_edit_distance_fn_factory
# 3rd-party imports
from Queue import Queue, PriorityQueue
def shortest_path_bfs(word1, word2, neighbors_fn, queue=None):
visited_words = set()
start_state = ( word1, )
q = Queue() if queue is None else queue
q.put(start_state)
while not q.empty():
path = q.get()
last_word = path[-1]
if last_word == word2:
return path
if last_word not in visited_words:
visited_words.add(last_word)
for neighbor_word in neighbors_fn(last_word):
if neighbor_word not in visited_words:
q.put(path + ( neighbor_word, ))
def shortest_path_A_star(word1, word2, neighbors_fn, queue=None):
underestimate_edit_distance = underestimate_edit_distance_fn_factory(word2)
visited_wo | rds = set()
start_state = (1 + underestimate_edit_distance(word1), ( word1, ))
q = PriorityQueue() if queue is None else queue
q.put(start_state)
while not q.empty():
_, path = q.get()
last_word = path[-1]
if last_word == word2:
return path
if last_word not in visited_words:
visited_words.add(last_word)
for neighbor_word in neigh | bors_fn(last_word):
if neighbor_word not in visited_words:
new_path = path + ( neighbor_word, )
underestimated_distance_to_target = underestimate_edit_distance(neighbor_word)
underestimated_total_path_length = underestimated_distance_to_target + len(new_path)
new_state = (underestimated_total_path_length, new_path)
q.put(new_state)
|
manhhomienbienthuy/scikit-learn | sklearn/impute/__init__.py | Python | bsd-3-clause | 438 | 0 | """Transformers for missing value imputation"""
import typing
f | rom ._base import MissingIndicator, SimpleImputer
from ._knn import KNNImputer
if typing.TYPE_CHECKING:
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
# TODO: remove this check once the estimator is no longer experimental.
from ._iterative import IterativeImputer # noqa
__all__ = ["MissingIndicator", "SimpleImputer | ", "KNNImputer"]
|
lissyx/build-mozharness | mozharness/mozilla/testing/testbase.py | Python | mpl-2.0 | 34,334 | 0.003291 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import copy
import os
import platform
import pprint
import re
import urllib2
import json
from mozharness.base.config import ReadOnlyDict, parse_config_file
from mozharness.base.errors import BaseErrorList
from mozharness.base.log import FATAL, WARNING
from mozharness.base.python import (
ResourceMonitoringMixin,
VirtualenvMixin,
virtualenv_config_options,
)
from mozharness.mozilla.buildbot import BuildbotMixin, TBPL_WARNING
from mozharness.mozilla.proxxy import Proxxy
from mozharness.mozilla.structuredlog import StructuredOutputParser
from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
from mozharness.mozilla.testing.try_ | tools import TryToolsMixin
from mozharness.mozilla.tooltool import TooltoolMixin
from mozharness.lib.python.authentication import get_credentials
INSTALLER_SUFFIXES = | ('.tar.bz2', '.zip', '.dmg', '.exe', '.apk', '.tar.gz')
testing_config_options = [
[["--installer-url"],
{"action": "store",
"dest": "installer_url",
"default": None,
"help": "URL to the installer to install",
}],
[["--installer-path"],
{"action": "store",
"dest": "installer_path",
"default": None,
"help": "Path to the installer to install. This is set automatically if run with --download-and-extract.",
}],
[["--binary-path"],
{"action": "store",
"dest": "binary_path",
"default": None,
"help": "Path to installed binary. This is set automatically if run with --install.",
}],
[["--exe-suffix"],
{"action": "store",
"dest": "exe_suffix",
"default": None,
"help": "Executable suffix for binaries on this platform",
}],
[["--test-url"],
{"action": "store",
"dest": "test_url",
"default": None,
"help": "URL to the zip file containing the actual tests",
}],
[["--test-packages-url"],
{"action": "store",
"dest": "test_packages_url",
"default": None,
"help": "URL to a json file describing which tests archives to download",
}],
[["--jsshell-url"],
{"action": "store",
"dest": "jsshell_url",
"default": None,
"help": "URL to the jsshell to install",
}],
[["--download-symbols"],
{"action": "store",
"dest": "download_symbols",
"type": "choice",
"choices": ['ondemand', 'true'],
"help": "Download and extract crash reporter symbols.",
}],
] + copy.deepcopy(virtualenv_config_options)
# TestingMixin {{{1
class TestingMixin(VirtualenvMixin, BuildbotMixin, ResourceMonitoringMixin, TooltoolMixin,
TryToolsMixin):
"""
The steps to identify + download the proper bits for [browser] unit
tests and Talos.
"""
installer_url = None
installer_path = None
binary_path = None
test_url = None
test_packages_url = None
test_zip_path = None
tree_config = ReadOnlyDict({})
symbols_url = None
symbols_path = None
jsshell_url = None
minidump_stackwalk_path = None
default_tools_repo = 'https://hg.mozilla.org/build/tools'
proxxy = None
def _query_proxxy(self):
"""manages the proxxy"""
if not self.proxxy:
self.proxxy = Proxxy(self.config, self.log_obj)
return self.proxxy
def download_proxied_file(self, url, file_name=None, parent_dir=None,
create_parent_dir=True, error_level=FATAL,
exit_code=3):
proxxy = self._query_proxxy()
return proxxy.download_proxied_file(url=url, file_name=file_name,
parent_dir=parent_dir,
create_parent_dir=create_parent_dir,
error_level=error_level,
exit_code=exit_code)
def download_file(self, *args, **kwargs):
'''
This function helps not to use download of proxied files
since it does not support authenticated downloads.
This could be re-factored and fixed in bug 1087664.
'''
if self.config.get("developer_mode"):
return super(TestingMixin, self).download_file(*args, **kwargs)
else:
return self.download_proxied_file(*args, **kwargs)
def query_value(self, key):
"""
This function allows us to check for a value
in the self.tree_config first and then on self.config
"""
return self.tree_config.get(key, self.config.get(key))
def query_build_dir_url(self, file_name):
"""
Resolve a file name to a potential url in the build upload directory where
that file can be found.
"""
if self.test_packages_url:
reference_url = self.test_packages_url
elif self.installer_url:
reference_url = self.installer_url
else:
self.fatal("Can't figure out build directory urls without an installer_url "
"or test_packages_url!")
last_slash = reference_url.rfind('/')
base_url = reference_url[:last_slash]
return '%s/%s' % (base_url, file_name)
def query_symbols_url(self):
if self.symbols_url:
return self.symbols_url
if not self.installer_url:
self.fatal("Can't figure out symbols_url without an installer_url!")
for suffix in INSTALLER_SUFFIXES:
if self.installer_url.endswith(suffix):
self.symbols_url = self.installer_url[:-len(suffix)] + '.crashreporter-symbols.zip'
return self.symbols_url
else:
self.fatal("Can't figure out symbols_url from installer_url %s!" % self.installer_url)
def _pre_config_lock(self, rw_config):
for i, (target_file, target_dict) in enumerate(rw_config.all_cfg_files_and_dicts):
if 'developer_config' in target_file:
self._developer_mode_changes(rw_config)
def _developer_mode_changes(self, rw_config):
""" This function is called when you append the config called
developer_config.py. This allows you to run a job
outside of the Release Engineering infrastructure.
What this functions accomplishes is:
* read-buildbot-config is removed from the list of actions
* --installer-url is set
* --test-url is set if needed
* every url is substituted by another external to the
Release Engineering network
"""
c = self.config
orig_config = copy.deepcopy(c)
self.warning("When you use developer_config.py, we drop " \
"'read-buildbot-config' from the list of actions.")
if "read-buildbot-config" in rw_config.actions:
rw_config.actions.remove("read-buildbot-config")
self.actions = tuple(rw_config.actions)
def _replace_url(url, changes):
for from_, to_ in changes:
if url.startswith(from_):
new_url = url.replace(from_, to_)
self.info("Replacing url %s -> %s" % (url, new_url))
return new_url
return url
assert c["installer_url"], "You must use --installer-url with developer_config.py"
if c.get("require_test_zip"):
if not c.get('test_url') and not c.get('test_packages_url'):
raise AssertionError("You must use --test-url or --test-packages-url with developer_config.py")
c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
if c.get("test_url"):
c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
if c.get("test_packages_url"):
c["test_packages_url"] = _replace_url(c["test_packages_url"], c["replace_urls"])
for key, value in |
DirectXMan12/nova-hacking | nova/scheduler/chance.py | Python | apache-2.0 | 4,443 | 0.002476 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Chance (Random) Scheduler implementation
"""
import random
from oslo.config import cfg
from nova import exception
from nova.scheduler import driver
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
class ChanceScheduler(driver.Scheduler):
"""Implements Scheduler as a random node selector."""
def _filter_hosts(self, request_spec, hosts, filter_properties):
"""Filter a list of hosts based on request_spec."""
ignore_hosts = filter_properties.get('ignore_hosts', [])
hosts = [host for host in hosts if host not in ignore_hosts]
return hosts
def _schedule(self, context, topic, request_spec, filter_properties):
"""Picks a host that is up at random."""
elevated = context.elevated()
hosts = self.hosts_up(elevated, topic)
if not hosts:
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
hosts = self._filter_hosts(request_spec, hosts, filter_properties)
if not hosts:
msg = _("Could not find another compute")
raise exception.NoValidHost(reason=msg)
return random.choice(hosts)
def select_hosts(self, context, request_spec, filter_properties):
"""Selects a set of random hosts."""
hosts = [self._schedule(context, CONF.compute_topic,
request_spec, filter_properties)
for instance_uuid in request_spec.get('instance_uuids', [])]
if not hosts:
raise exception.NoValidHost(reason="")
return hosts
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
"""Create and run an instance or instances."""
instance_uuids = request_spec.get('instance_uuids')
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
try:
host = self._schedule(context, CONF.compute_topic,
request_spec, filter_properties)
updated_instance = driver.instance_update_db(context,
| instance_uuid)
self.compute_rpcapi.run_instance(context,
instance=updated_instance, host=host,
requested_networks=requested_networks,
injected_files=injected_files | ,
admin_password=admin_password,
is_first_time=is_first_time,
request_spec=request_spec,
filter_properties=filter_properties)
except Exception as ex:
# NOTE(vish): we don't reraise the exception here to make sure
# that all instances in the request get set to
# error properly
driver.handle_schedule_error(context, ex, instance_uuid,
request_spec)
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
"""Select a target for resize."""
host = self._schedule(context, CONF.compute_topic, request_spec,
filter_properties)
self.compute_rpcapi.prep_resize(context, image, instance,
instance_type, host, reservations)
|
izrik/tudor | tests/logic_t/layer/LogicLayer/test_do_authorize_user_for_task_by_email.py | Python | gpl-2.0 | 7,024 | 0 | #!/usr/bin/env python
import unittest
from werkzeug.exceptions import Forbidden, NotFound
from tests.logic_t.layer.LogicLayer.util import generate_ll
class AuthorizeUserForTaskByEmailTest(unittest.TestCase):
def setUp(self):
self.ll = generate_ll()
self.pl = self.ll.pl
def test_authorizes_user(self):
# given a task
task = self.pl.create_task('task')
self.pl.add(task)
# and a user to authorize
user = self.pl.create_user('user@example.com')
self.pl.add(user)
# and an admin user to attempt the authorization
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
self.pl.commit()
# precondition: the user is not authorized for the task
self.assertNotIn(user, task.users)
self.assertNotIn(task, user.tasks)
# when
result = self.ll.do_authorize_user_for_task_by_email(task.id,
user.email, admin)
# then the user is now authorized for the task
self.assertIn(user, task.users)
self.assertIn(task, user.tasks)
# and the task is returned
self.assertIs(result, task)
def test_task_id_none_raises(self):
# given a user to try to authorize
user = self.pl.create_user('user@example.com')
self.pl.add(user)
# and an admin user to attempt the authorization
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
self.pl.commit()
# precondition: there are no tasks
self.assertEqual(0, self.pl.count_tasks())
# and the user is not authorized for anything
self.assertEqual(0, len(user.tasks))
# expect
self.assertRaises(
ValueError,
self.ll.do_authorize_user_for_task_by_email,
None, user.email, admin)
# and the user was not authorized for anything
self.assertEqual(0, len(user.tasks))
def test_task_not_found_raises(self):
# given a user to try to authorize
user = self.pl.create_user('user@example.com')
self.pl.add(user)
# and an admin user to attempt the authorization
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
self.pl.commit()
# precondition: there are no tasks
self.assertEqual(0, self.pl.count_tasks())
# and the user is not authorized for anything
self.assertEqual(0, len(user.tasks))
# expect
self.assertRaises(
NotFound,
self.ll.do_authorize_user_for_task_by_email,
1, user.email, admin)
# and the user was not authorized for anything
self.assertEqual(0, len(user.tasks))
def test_email_none_raises(self):
# given a task
task = self.pl.create_task('task')
self.pl.add(task)
# and an admin user to attempt the authorization
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
self.pl.commit()
# precondition no users are authorized for the task
self.assertEqual(0, len(task.users))
# expect
self.assertRaises(
ValueError,
self.ll.do_authorize_user_for_task_by_email,
task.id, None, admin)
# and no users are authorized for the task
self.assertEqual(0, len(task.users))
def test_email_empty_raises(self):
# given a task
task = self.pl.create_task('task')
self.pl.add(task)
# and an admin user to attempt the authorization
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
self.pl.commit()
# precondition no users are authorized for the task
self.assertEqual(0, len(task.users))
# expect
self.assertRaises(
ValueError,
self.ll.do_authorize_user_for_task_by_email,
task.id, '', admin)
# and no users are authorized for the task
self.assertEqual(0, len(task.users))
def test_email_not_found_raises(self):
# given a task
task = self.pl.create_task('task')
self.pl.add(task)
# and an admin user to attempt the authorization
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
self.pl.commit()
# precondition: there are no users with that email | address
self.assertEqual(0, self.pl.count_users(email_in=['user@example.com']))
# and no users are authorized for the task
self.assertEqual(0, len(task.users))
# expect
self.assertRaises(
NotFound,
self.ll.do_authorize_user_for_task_by_email,
task.id, 'user@example.com', admin)
| # and no users are authorized for the task
self.assertEqual(0, len(task.users))
def test_current_user_not_allowed_raises(self):
# given a task
task = self.pl.create_task('task')
self.pl.add(task)
# and a user to authorize
user = self.pl.create_user('user@example.com')
self.pl.add(user)
# and a non-admin user to attempt the authorization
non_admin = self.pl.create_user('user2@example.com', is_admin=False)
self.pl.add(non_admin)
self.pl.commit()
# precondition: the current_user is not authorized or admin
self.assertNotIn(non_admin, task.users)
self.assertNotIn(task, non_admin.tasks)
self.assertFalse(non_admin.is_admin)
# when
self.assertRaises(
Forbidden,
self.ll.do_authorize_user_for_task_by_email,
task.id, user.email, non_admin)
# and no users are authorized for the task
self.assertEqual(0, len(task.users))
def test_current_user_is_authorized_non_admin_then_authorizes_user(self):
# given a task
task = self.pl.create_task('task')
self.pl.add(task)
# and a user to authorize
user = self.pl.create_user('user@example.com')
self.pl.add(user)
# and a non-admin user to attempt the authorization
non_admin = self.pl.create_user('user2@example.com', is_admin=False)
self.pl.add(non_admin)
task.users.add(non_admin)
self.pl.commit()
# precondition: the current_user is authorized for the task
self.assertIn(non_admin, task.users)
self.assertIn(task, non_admin.tasks)
# and the current_user is not an admin
self.assertFalse(non_admin.is_admin)
# when
result = self.ll.do_authorize_user_for_task_by_email(
task.id, user.email, non_admin)
# then the user is now authorized for the task
self.assertIn(user, task.users)
self.assertIn(task, user.tasks)
# and the task is returned
self.assertIs(result, task)
|
SPARC-Auburn/Lab-Assistant | assistant/tests/gttstest.py | Python | apache-2.0 | 1,149 | 0.008703 | """
A testing program that utilizes Googles Text To Speech (GTTS) engine and pyglet to speak text.
Ensure that AVbin is installed for pyglet to run properly: https://avbin.github.io/AVbin/Downloa | d.html
"""
from gtts import gTTS
import time
import os
from sys import platform
if platform == "linux" or platform == "linux2":
import pygame
else:
import pyglet
def speak(whattosay):
print (whattosay)
audio_file = "response.mp3"
tts = gTTS(text=str(whattosay), lang="en")
tts.s | ave(audio_file)
playsound(audio_file)
os.remove(audio_file)
def playsound(audio_file):
if platform == "linux" or platform == "linux2":
pygame.mixer.pre_init(22050,-16,1,2048)
pygame.init()
pygame.mixer.init()
pygame.mixer.music.load(audio_file)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
elif platform == "darwin" or platform == "win32":
sound = pyglet.media.load(audio_file, streaming=False)
sound.play()
time.sleep(sound.duration)
speak("One")
speak("Two")
speak("Three")
speak("It Works!")
|
M2IS/vendendo | userapp/apps.py | Python | gpl-2.0 | 124 | 0 | from __future__ im | port unicode_literals
from django.apps import AppConfig
class UserConfig(AppConfig):
na | me = 'user'
|
RobWouters/omnikassa | tests/test_omnikassa.py | Python | mit | 1,432 | 0.000698 | import unittest
from omnikassa import Omnikassa
from omnikassa.exceptions import (
InvalidSeal,
InvalidResponseCode,
)
class OmnikassaTests(unittest.TestCase):
def setUp(self):
self.settings = {
'normalReturnUrl': 'https://example.com/omnikassa',
'automaticResponseUrl': 'https://example.com/callback',
}
self.omnikassa = Omnikassa(self.settings)
def test_seal_fail(self):
data = 'test data'
with self.assertRaises(InvalidSeal):
self.omnikassa.verify_callback(
{'Seal': 'invalid seal', 'Data': data}
)
def test_check_seal(self):
data = 'test string'
seal = 'd9d4737039e6066ff5576e75aee5bd02f592e5d49470cc293cd33e7470e46ef9'
self.assertTrue(self.omnikassa._check | _seal(seal, data))
data = 'test string 1'
self.assertFalse(self.omnikassa._check_seal(seal, data))
def test_response_fail(self):
data = 'responseCode=99'
seal = self.omnikassa._generat | e_seal(data)
with self.assertRaises(InvalidResponseCode):
self.omnikassa.verify_callback({'Seal': seal, 'Data': data})
def test_response_success(self):
data = 'responseCode=00'
seal = self.omnikassa._generate_seal(data)
data = self.omnikassa.verify_callback({'Seal': seal, 'Data': data})
self.assertDictEqual({'responseCode': '00'}, data)
|
michel-slm/bodhi | bodhi/services/comments.py | Python | gpl-2.0 | 6,317 | 0.002691 | # This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import math
from cornice import Service
from pyramid.httpexceptions import HTTPBadRequest
from sqlalchemy.sql import or_
from bodhi import log
from bodhi.models import Comment, Build, Bug, CVE, Package, Update
import bodhi.captcha
import bodhi.schemas
import bodhi.security
from bodhi.validators import (
validate_packages,
validate_update,
validate_updates,
validate_update_owner,
validate_comment_id,
validate_username,
validate_bug_feedback,
validate_testcase_feedback,
validate_captcha,
)
comment = Service(name='comment', path='/comments/{id}',
validators=(validate_comment_id,),
description='Comment submission service',
cors_origins=bodhi.security.cors_origins_ro)
comments = Service(name='comments', path='/comments/',
description='Comment submission service',
# Note, this 'rw' is not a typo. the @comments service has
# a ``post`` section at the bottom.
cors_origins=bodhi.security.cors_origins_rw)
@comment.get(accept=('application/json', 'text/json'), renderer='json')
@comment.get(accept=('application/javascript'), renderer='jsonp')
@comment.get(accept=('application/atom+xml'), renderer='rss')
@comment.get(accept="text/html", renderer="comment.html")
def get_comment(request):
""" Return a single comment from an id """
return dict(comment=request.validated['comment'])
@comments.get(schema=bodhi.schemas.ListCommentSchema,
accept=('application/json', 'text/json'), renderer='json',
validators=(
validate_username,
validate_update_owner,
validate_updates,
validate_packages,
))
@comments.get(schema=bodhi.schemas.ListCommentSchema,
accept=('application/javascript'), renderer='jsonp',
validators=(
validate_username,
validate_update_owner,
validate_updates,
validate_packages,
))
@comments.get(schema=bodhi.schemas.ListCommentSchema,
accept=('application/atom+xml'), renderer='rss',
validators=(
validate_username,
validate_update_owner,
validate_updates,
validate_packages,
))
@comments.get(schema=bodhi.schemas.ListCommentSchema,
accept=('text/html'), renderer='comments.html',
validators=(
validate_username,
validate_update_owner,
validate_updates,
validate_packages,
))
def query_comments(request):
db = request.db
| data = request.validated
query = db.query(Comment)
anonymous = data.get('anonymous')
if anonymous is not None:
query = query.filter_by(anonymous=anonymous)
like = data.get('like')
if like is not None:
query = query.filter(or_(*[
Comment.text.like('%%%s%%' % like | )
]))
packages = data.get('packages')
if packages is not None:
query = query\
.join(Comment.update)\
.join(Update.builds)\
.join(Build.package)
query = query.filter(or_(*[Build.package==pkg for pkg in packages]))
since = data.get('since')
if since is not None:
query = query.filter(Comment.timestamp >= since)
updates = data.get('updates')
if updates is not None:
query = query.filter(or_(*[Comment.update==u for u in updates]))
update_owner = data.get('update_owner')
if update_owner is not None:
query = query.join(Comment.update)
query = query.filter(Update.user==update_owner)
user = data.get('user')
if user is not None:
query = query.filter(Comment.user==user)
query = query.order_by(Comment.timestamp.desc())
total = query.count()
page = data.get('page')
rows_per_page = data.get('rows_per_page')
pages = int(math.ceil(total / float(rows_per_page)))
query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
return dict(
comments=query.all(),
page=page,
pages=pages,
rows_per_page=rows_per_page,
total=total,
chrome=data.get('chrome'),
)
@comments.post(schema=bodhi.schemas.SaveCommentSchema,
#permission='create', # We need an ACL for this to work...
renderer='json',
validators=(
validate_update,
validate_bug_feedback,
validate_testcase_feedback,
validate_captcha,
))
def new_comment(request):
""" Add a new comment to an update. """
settings = request.registry.settings
data = request.validated
# This has already been validated at this point, but we need to ditch
# it since the models don't care about a csrf argument.
data.pop('csrf_token')
update = data.pop('update')
email = data.pop('email', None)
author = email or (request.user and request.user.name)
anonymous = bool(email) or not author
if not author:
request.errors.add('body', 'email', 'You must provide an author')
request.errors.status = HTTPBadRequest.code
return
try:
com = update.comment(author=author, anonymous=anonymous, **data)
except Exception as e:
log.exception(e)
request.errors.add('body', 'comment', 'Unable to create comment')
return
return dict(comment=com)
|
hackoregon/urbandev-backend | wsgi.py | Python | mit | 1,231 | 0.019496 | import os, sys
# Web app imports
from flask import Flask, stream_with_context, request, Response
# Web app config
app = Flask(__name__)
app.config['DEBUG'] = True
def genMarkdownResult(filename):
hdr = '''<!DOCTYPE html>
<html>
<title>{}</title>
<xmp theme="united" style="display:none;">'''
ftr = '''</xmp>
<script src="/static/assets/strapdown/strapdown.js"></script>
</html>'''
try:
d = open(filename, 'r').readlines()
except IOError as e:
return 'Exception: ' + str(e)
else:
if len(d) > 1:
| return hdr.format(d[0]) + '\n'.join(d[1:]) + ftr
else:
return 'Error: Need more that one line in .md file'
@app.route('/')
def index():
return genMarkdownResult("doc/services.md")
@app.route('/permits/')
def permits():
return 'This is the permits service'
@app.rou | te('/neighborhoods/')
def neighborhoods():
return 'This is the neighborhoods service'
if __name__ == '__main__':
app.run()
#def application(environ, start_response):
# start_response('200 OK', [('Content-Type', 'text/html')])
# return [b'<html><body><h1 style="color:blue">Hello from wsgi</h1></body></html>']
|
szpotona/cloudify-openstack-plugin | system_tests/openstack_handler.py | Python | apache-2.0 | 17,759 | 0.000282 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import random
import logging
import os
import time
import copy
from contextlib import contextmanager
from cinderclient.v1 import client as cinderclient
import novaclient.v2.client as nvclient
import neutronclient.v2_0.client as neclient
from retrying import retry
from cosmo_tester.framework.handlers import (
BaseHandler,
BaseCloudifyInputsConfigReader)
from cosmo_tester.framework.util import get_actual_keypath
logging.getLogger('neutronclient.client').setLevel(logging.INFO)
logging.getLogger('novaclient.client').setLevel(logging.INFO)
class OpenstackCleanupContext(BaseHandler.CleanupContext):
def __init__(self, context_name, env):
super(OpenstackCleanupContext, self).__init__(context_name, env)
self.before_run = self.env.handler.openstack_infra_state()
def cleanup(self):
super(OpenstackCleanupContext, self).cleanup()
resources_to_teardown = self.get_resources_to_teardown(
self.env, resources_to_keep=self.before_run)
if self.skip_cleanup:
self.logger.warn('[{0}] SKIPPING cleanup: of the resources: {1}'
.format(self.context_name, resources_to_teardown))
return
self.logger.info('[{0}] Performing cleanup: will try removing these '
'resources: {1}'
.format(self.context_name, resources_to_teardown))
failed_to_remove = self.env.handler.remove_openstack_resources(
resources_to_teardown)
if failed_to_remove:
trimmed_dict = {key: value for key, value in
failed_to_remove.iteritems()
if value}
if len(trimmed_dict) > 0:
msg = '[{0}] failed to remove some resources during ' \
'cleanup: {1}'\
.format(self.context_name, failed_to_remove)
self.logger.error(msg)
raise RuntimeError(msg)
@classmethod
def clean_all(cls, env):
super(OpenstackCleanupContext, cls).clean_all(env)
resources_to_teardown = cls.get_resources_to_teardown(env)
cls.logger.info('Openstack handler performing clean_all: will try '
'removing these resources: {0}'
.format(resources_to_teardown))
failed_to_remove = env.handler.remove_openstack_resources(
resources_to_teardown)
if failed_to_remove:
trimmed_dict = {key: value for key, value in
failed_to_remove.iteritems()
if value}
if len(trimmed_dict) > 0:
msg = 'Openstack handler failed to remove some resources' \
' during clean_all: {0}'.format(trimmed_dict)
cls.logger.error(msg)
raise RuntimeError(msg)
@classmethod
def get_resources_to_teardown(cls, env, resources_to_keep=None):
all_existing_resources = env.handler.openstack_infra_state()
if resources_to_keep:
return env.handler.openstack_infra_state_delta(
before=resources_to_keep, after=all_existing_resources)
else:
return all_existing_resources
def update_server_id(self, server_name):
# retrieve the id of the new server
nova, _, _ = self.env.handler.openstack_clients()
servers = nova.servers.list(
search_opts={'name': server_name})
if len(servers) > 1:
raise RuntimeError(
'Expected 1 server with name {0}, but found {1}'
.format(server_name, len(servers)))
new_server_id = servers[0].id
# retrieve the id of the old server
old_server_id = None
servers = self.before_run['servers']
for server_id, name in servers.iteritems():
if server_name == name:
old_server_id = server_id
break
if old_server_id is None:
raise RuntimeError(
'Could not find a server with name {0} '
'in the internal cleanup context state'
.format(server_name))
# replace the id in the internal state
servers[new_server_id] = servers.pop(old_server_id)
class CloudifyOpenstackInputsConfigReader(BaseCloudifyInputsConfigReader):
def __init__(self, cloudify_config, manager_blueprint_path, **kwargs):
super(CloudifyOpenstackInputsConfigReader, self).__init__(
cloudify_config, manager_blueprint_path=manager_blueprint_path,
**kwargs)
@property
def region(self):
return self.config['region']
@property
def management_server_name(self):
return self.config['manager_server_name']
@property
def agent_key_path(self):
return self.config['agent_private_key_path']
@property
def management_user_name(self):
return self.config['manager_server_user']
@property
def management_key_path(self):
return self.config['manager_private_key_path']
| @property
def age | nt_keypair_name(self):
return self.config['agent_public_key_name']
@property
def management_keypair_name(self):
return self.config['manager_public_key_name']
@property
def use_existing_agent_keypair(self):
return self.config['use_existing_agent_keypair']
@property
def use_existing_manager_keypair(self):
return self.config['use_existing_manager_keypair']
@property
def external_network_name(self):
return self.config['external_network_name']
@property
def keystone_username(self):
return self.config['keystone_username']
@property
def keystone_password(self):
return self.config['keystone_password']
@property
def keystone_tenant_name(self):
return self.config['keystone_tenant_name']
@property
def keystone_url(self):
return self.config['keystone_url']
@property
def neutron_url(self):
return self.config.get('neutron_url', None)
@property
def management_network_name(self):
return self.config['management_network_name']
@property
def management_subnet_name(self):
return self.config['management_subnet_name']
@property
def management_router_name(self):
return self.config['management_router']
@property
def agents_security_group(self):
return self.config['agents_security_group_name']
@property
def management_security_group(self):
return self.config['manager_security_group_name']
class OpenstackHandler(BaseHandler):
CleanupContext = OpenstackCleanupContext
CloudifyConfigReader = CloudifyOpenstackInputsConfigReader
def before_bootstrap(self):
super(OpenstackHandler, self).before_bootstrap()
with self.update_cloudify_config() as patch:
suffix = '-%06x' % random.randrange(16 ** 6)
server_name_prop_path = 'manager_server_name'
patch.append_value(server_name_prop_path, suffix)
def after_bootstrap(self, provider_context):
super(OpenstackHandler, self).after_bootstrap(provider_context)
resources = provider_context['resources']
agent_keypair = resources['agents_keypair']
management_keypair = resources['management_keypair']
self.remove_agent_keypair = agent_keypair['external_resource'] is False
self.remove_management_keypair = \ |
deathmetalland/IkaLog | tools/create_mask.py | Python | apache-2.0 | 1,712 | 0.001752 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o | r implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a developtment tool to create a filtering mask from multiple images.
# Usage:
# ./tools/create_mask.p | y --input img1.png img2.png --output mask.png
#
# TODO:
# Support other filters in addition to MM_DARK.
#
import argparse
import cv2
import os.path
import sys
# Append the Ikalog root dir to sys.path to import IkaUtils.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from ikalog.utils import *
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, nargs='*', required=True)
parser.add_argument('--output', type=str, required=True)
return vars(parser.parse_args())
if __name__ == '__main__':
args = get_args()
filenames = args['input']
img_masks = []
for filename in filenames:
img = cv2.imread(filename, 1)
img_mask = matcher.MM_DARK()(img)
img_masks.append(img_mask)
result = img_masks[0]
for mask in img_masks[1:]:
result = cv2.bitwise_and(result, mask)
cv2.imwrite(args['output'], result)
|
pycket/pycket | pycket/values_struct.py | Python | mit | 55,181 | 0.002773 | import itertools, sys
from pycket import config
from pycket import values
from pycket import vector as values_vector
from pycket.arity import Arity
from pycket.base import SingleResultMixin, UnhashableType
from pycket.cont import continuation, label
from pycket.error import SchemeException
from pycket.prims.expose import default, make_call_method
from pycket.small_list import inline_small_list
from pycket.util import strip_immutable_field_name
from pycket.values_parameter import W_Parameter
from rpython.rlib import jit
from rpython.rlib.objectmodel import import_from_mixin, not_rpython
from rpython.rlib.unroll import unrolling_iterable
w_prefab_symbol = values.W_Symbol.make("prefab")
class W_StructInspector(values.W_Object):
errorname = "struct-inspector"
_immutable_fields_ = ["w_super"]
_attrs_ = ["w_super"]
@staticmethod
def make(w_inspector, issibling = False):
assert isinstance(w_inspector, W_StructInspector)
w_super = w_inspector
if issibling:
w_super = w_inspector.w_super if w_inspector is not None else None
return W_StructInspector(w_super)
def __init__(self, w_super):
self.w_super = w_super
@jit.elidable
def has_control(self, struct_type):
w_inspector = struct_type.w_inspector
if not isinstance(w_inspector, W_StructInspector):
return True
else:
w_inspector = w_inspector.w_super
while isinstance(w_inspector, W_StructInspector):
if w_inspector is self:
return True
w_inspector = w_inspector.w_super
return False
current_inspector = W_StructInspector(None)
current_inspector_param = W_Parameter(current_inspector)
class W_StructType(values.W_Object):
errorname = "struct-type-descriptor"
_immutable_fields_ = [
"name", "constructor_name", "w_super",
"init_field_count", "auto_field_count", "total_field_count",
"total_auto_field_count", "total_init_field_count",
"w_auto_value", "properties", "w_inspector", "immutables[*]",
"immutable_fields[*]", "w_guard", "auto_values_w[*]", "offsets[*]",
"constructor", "predicate", "accessor", "mutator", "prop_procedure",
"constructor_arity", "procedure_source", "isprefab", "isopaque",
"prop_sealed"]
_attrs_ = map(strip_immutable_field_name, _immutable_fields_)
unbound_prefab_types = {}
@jit.unroll_safe
def __init__(self, w_name, w_super_type, init_field_count, auto_field_count,
w_auto_value, w_inspector, w_proc_spec, immutables, w_guard,
w_constructor_name):
self.name = | w_name
self.constructor_name = w_constructor_name
self.w_super = w_super_type
self.init_field_count = init_f | ield_count
self.total_init_field_count = init_field_count
self.auto_field_count = auto_field_count
self.total_auto_field_count = auto_field_count
self.total_field_count = init_field_count + auto_field_count
if isinstance(w_super_type, W_StructType):
self.total_field_count += w_super_type.total_field_count
self.total_init_field_count += w_super_type.total_init_field_count
self.total_auto_field_count += w_super_type.total_auto_field_count
self.w_auto_value = w_auto_value
self.properties = []
self.prop_procedure = None
self.prop_sealed = False
self.procedure_source = None
self.w_inspector = w_inspector
if isinstance(w_proc_spec, values.W_Fixnum):
immutables = [w_proc_spec.value] + immutables
self.immutables = immutables
self.w_guard = w_guard
self.auto_values_w = [self.w_auto_value] * self.auto_field_count
self.setup_prefab()
self._calculate_offsets()
self._generate_methods()
def setup_prefab(self):
self.isprefab = self.w_inspector is w_prefab_symbol
if self.isprefab:
self.isopaque = False
else:
self.isopaque = self.w_inspector is not values.w_false
@jit.unroll_safe
def _generate_methods(self):
""" Generate constructor, predicate, mutator, and accessor """
count = self.total_init_field_count
self.constructor_arity = Arity([count], -1)
self.constructor = W_StructConstructor(self)
self.predicate = W_StructPredicate(self)
self.accessor = W_StructAccessor(self)
self.mutator = W_StructMutator(self)
@jit.unroll_safe
def _calculate_offsets(self):
offsets = []
immutable_fields = [] # absolut indices
w_struct_type = self
while isinstance(w_struct_type, W_StructType):
offset = (w_struct_type.total_field_count -
w_struct_type.init_field_count -
w_struct_type.auto_field_count)
offsets.append((w_struct_type, offset))
for immutable_field in w_struct_type.immutables:
immutable_fields.append(immutable_field + offset)
w_struct_type = w_struct_type.w_super
self.offsets = offsets[:]
self.immutable_fields = immutable_fields[:]
@staticmethod
def make(w_name, w_super_type, init_field_count, auto_field_count,
w_auto_value=values.w_false, w_properties=values.w_null,
w_inspector=values.w_false, w_proc_spec=values.w_false,
immutables=[], w_guard=values.w_false,
w_constructor_name=values.w_false, env=None, cont=None):
"""
This method returns five instances:
W_StructType
W_StructConstructor
W_StructPredicate
W_StructAccessor
W_StructMutator
"""
w_struct_type = W_StructType.make_simple(
w_name=w_name,
w_super_type=w_super_type,
init_field_count=init_field_count,
auto_field_count=auto_field_count,
w_auto_value=w_auto_value,
w_inspector=w_inspector,
w_proc_spec=w_proc_spec,
immutables=immutables,
w_guard=w_guard,
w_constructor_name=w_constructor_name)
return w_struct_type.initialize_properties(w_properties, w_proc_spec, env, cont)
@staticmethod
@jit.elidable
def make_prefab(prefab_key):
if prefab_key in W_StructType.unbound_prefab_types:
return W_StructType.unbound_prefab_types[prefab_key]
if prefab_key.super_key:
w_super_type = W_StructType.make_prefab(prefab_key.super_key)
else:
w_super_type = values.w_false
immutables = [i for i in range(prefab_key.init_field_count) \
if i not in prefab_key.mutables]
w_struct_type = W_StructType(
w_name=prefab_key.w_name,
w_super_type=w_super_type,
init_field_count=prefab_key.init_field_count,
auto_field_count=prefab_key.auto_field_count,
w_auto_value=prefab_key.w_auto_value,
w_inspector=w_prefab_symbol,
w_proc_spec=values.w_false,
immutables=immutables,
w_guard=values.w_false,
w_constructor_name=values.w_false)
W_StructType.unbound_prefab_types[prefab_key] = w_struct_type
return w_struct_type
@staticmethod
def make_simple(w_name, w_super_type, init_field_count,
auto_field_count, w_auto_value=values.w_false,
w_inspector=values.w_false,
w_proc_spec=values.w_false, immutables=[],
w_guard=values.w_false, w_constructor_name=values.w_false):
"""
This method returns an instance of W_StructType only.
It does not support properties.
"""
w_struct_type = W_StructType(
w_name=w_name,
w_super_type=w_super_type,
init_field_count=init_field_count,
auto_field_count=auto_field_count,
w_auto_value=w_auto_value,
w_inspector=w_insp |
odoousers2014/LibrERP | account_invoice_merge/invoice.py | Python | agpl-3.0 | 4,433 | 0.009023 | # -*- coding: utf-8 -*-
#################################################################################
# Autor: Mikel Martin (mikel@zhenit.com)
# Copyright (C) 2012 ZhenIT Software (<http://ZhenIT.com>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
class account_invoice(osv.osv):
_inherit = "account.invoice"
def merge_invoice(self, cr, uid, invoices, merge_lines, context=None):
""" Merge draft invoices. Work only with same partner.
You can merge invoices and refund invoices with echa other.
Moves all lines on the first invoice.
"""
if len(invoices) <= 1:
return False
parent = self.pool.get('account.invoice').browse(cr,uid,context['active_id'])
for inv in invoices :
if parent.partner_id != inv.partner_id :
raise osv.except_osv(_("Partners don't match!"),_("Can not merge invoice(s) on different partners or states !. %s different from %s") % parent.partner_id.name, inv.partner_ | id.name )
| if inv.state != 'draft' :
raise osv.except_osv(_("Invalid action !"),_("You can merge only invoices in draft state."))
# Merge invoices that are in draft state
inv_line_obj = self.pool.get('account.invoice.line')
name = parent.name or ''
comment = parent.comment
origin = parent.origin
for inv in invoices:
if inv.id == parent.id:
continue
# check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head.
if inv.name:
name += ', %s' % inv.name
if inv.comment:
comment += ', %s' % inv.comment
if inv.origin:
origin += ', %s' % inv.origin
line_ids = inv_line_obj.search(cr, uid, [('invoice_id','=',inv.id)])
for inv_lin in inv_line_obj.browse(cr, uid, line_ids):
mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id','=',parent.id),('product_id','=',inv_lin.product_id.id)])
if merge_lines and len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity
inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)})
inv_line_obj.unlink(cr, uid, [inv_lin.id])
elif inv.type == parent.type:
inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id})
else:
inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id,'quantity':-inv_lin.quantity})
self.write(cr, uid, parent.id,{'origin':origin,'name':name,'comment':comment})
self.unlink(cr, uid, [inv.id])
self.button_reset_taxes(cr, uid, [parent.id])
return parent.id
account_invoice()
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
def _can_merge_quantity(self, cr, uid, id1, id2, context=None):
qty = False
invl1 = self.browse(cr, uid, id1)
invl2 = self.browse(cr, uid, id2)
if invl1.product_id.id == invl2.product_id.id \
and invl1.price_unit == invl2.price_unit \
and invl1.uos_id.id == invl2.uos_id.id \
and invl1.account_id.id == invl2.account_id.id \
and invl1.discount == invl2.discount:
qty = invl1.quantity + invl2.quantity
return qty
account_invoice_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dbarbier/privot | python/test/t_TruncatedNormalFactory_std.py | Python | lgpl-3.0 | 1,645 | 0.004255 | #! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
mu = NumericalPoint(4, 0.0)
sigma = NumericalPoint(4, 1.0)
a = NumericalPoint(4)
b = NumericalPoi | nt(4)
a[0] | = -4.0
b[0] = 4.0
a[1] = -1.0
b[1] = 4.0
a[2] = 1.0
b[2] = 2.0
a[3] = 3.0
b[3] = 6.0
PlatformInfo.SetNumericalPrecision(4)
for i in range(4) :
distribution = TruncatedNormal(mu[i], sigma[i], a[i], b[i])
size = 10000
sample = distribution.getSample(size)
factory = TruncatedNormalFactory()
estimatedDistribution = factory.build(sample)
print "distribution=", repr(distribution)
print "Estimated distribution=", repr(estimatedDistribution)
estimatedDistribution = factory.build()
print "Default distribution=", estimatedDistribution
estimatedDistribution = factory.build(distribution.getParametersCollection())
print "Distribution from parameters=", estimatedDistribution
estimatedTruncatedNormal = factory.buildAsTruncatedNormal(sample)
print "TruncatedNormal =", distribution
print "Estimated TruncatedNormal=", estimatedTruncatedNormal
estimatedTruncatedNormal = factory.buildAsTruncatedNormal()
print "Default TruncatedNormal=", estimatedTruncatedNormal
estimatedTruncatedNormal = factory.buildAsTruncatedNormal(distribution.getParametersCollection())
print "TruncatedNormal from parameters=", estimatedTruncatedNormal
except :
import sys
print "t_TruncatedNormalFactory_std.py", sys.exc_type, sys.exc_value
|
saffroncoin/csfrblockd | lib/blockchain/addrindex.py | Python | mit | 8,675 | 0.003919 | """
This module replaces blockexplorer functions with RPC calls to addrindex-enabled saffroncoind.
Module configuration:
blockchain-service-name=addrindex
blockchain-service-connect=http://RPC_USER:RPC_PASSWORD@RPC_HOST:RPC_PORT
"""
import decimal
import json
import re
import time
from lib import config, util, util_bitcoin
from geventhttpclient import HTTPClient
from geventhttpclient.url import URL
JSONRPC_API_REQUEST_TIMEOUT = 30
def rpc(method, params=None, abort_on_error=False):
endpoint = config.BLOCKCHAIN_SERVICE_CONNECT
auth = None
m = re.search('(.*?//)(.*?):(.*?)@(.*)', endpoint)
if m:
endpoint = m.group(1) + m.group(4)
auth = (m.group(2), m.group(3))
if not params:
params = []
payload = {
"id": 0,
"jsonrpc": "2.0",
"method": method,
"params": params,
}
headers = {
'Content-Type': 'application/json',
'Connection':'close', #no keepalive
}
if auth:
#auth should be a (username, password) tuple, if specified
headers['Authorization'] = util.http_basic_auth_str(auth[0], auth[1])
try:
u = URL(endpoint)
client = HTTPClient.from_url(u, connection_timeout=JSONRPC_API_REQUEST_TIMEOUT,
network_timeout=JSONRPC_API_REQUEST_TIMEOUT)
r = client.post(u.request_uri, body=json.dumps(payload), headers=headers)
except Exception, e:
raise Exception("Got call_jsonrpc_api request error: %s" % e)
else:
if r.status_code != 200 and abort_on_error:
raise Exception("Bad status code returned from csfrd: '%s'. result body: '%s'." % (r.status_code, r.read()))
result = json.loads(r.read(), parse_float=decimal.Decimal)
finally:
client.close()
if abort_on_error and 'error' in result:
raise Exception("Got back error from server: %s" % result['error'])
return result['result']
def check():
pass
def getinfo():
return {'info': rpc('getinfo', None)}
def getmempool():
rawtxlist = rpc('getrawmempool', None)
txlist = []
for rawtx in rawtxlist:
try:
| txlist.append(rpc('getrawtransaction', [rawtx]))
except Exception:
pass
rv = [rpc('decoderawtransaction', [tx]) for tx in txlist]
for tx in rv:
tx['confirmations'] = 0
return rv
def searchrawtx(address):
rv = []
idx = 0
while True:
chunk = rpc('searchrawtransactions', [address, 1, idx])
if not chunk:
break
| rv += [t for t in chunk if 'confirmations' in t and t['confirmations']]
idx += 100
return rv
def ismine(vout, address, allow_multisig=False):
return 'scriptPubKey' in vout and \
(allow_multisig or vout['scriptPubKey']['type'] != 'multisig') and \
'addresses' in vout['scriptPubKey'] and \
address in vout['scriptPubKey']['addresses']
def has_my_vout(tx, address):
for vout in tx['vout']:
if ismine(vout, address):
return True
return False
def has_my_vin(tx, vout_txs, address):
for vin in tx['vin']:
if vin['txid'] in vout_txs:
for vout in vout_txs[vin['txid']]:
if vout['n'] == vin['vout'] and ismine(vout, address):
return True
return False
def locate_vout(vouts, n):
for vout in vouts:
if vout['n'] == n:
return vout
return None
def listunspent(address):
# TODO p2sh
with decimal.localcontext(decimal.DefaultContext):
txraw = getmempool() + searchrawtx(address)
txs = {tx['txid']: tx for tx in txraw}
for txid in txs:
for vin in txs[txid]['vin']:
if vin['txid'] in txs:
txs[vin['txid']]['vout'] = [v for v in txs[vin['txid']]['vout'] if v['n'] != vin['vout']]
for txid in txs:
txs[txid]['vout'] = [v for v in txs[txid]['vout'] if ismine(v, address)]
rv = []
for txid in txs:
for vout in txs[txid]['vout']:
rv.append({'address': address,
'txid': txid,
'vout': vout['n'],
'ts': txs[txid]['time'] if 'time' in txs[txid] else int(time.time()),
'scriptPubKey': vout['scriptPubKey']['hex'],
'amount': vout['value'],
'confirmations': txs[txid]['confirmations']})
return rv
def getaddressinfo(address):
with decimal.localcontext(decimal.DefaultContext):
totalReceived = decimal.Decimal(0.0)
totalSent = decimal.Decimal(0.0)
unconfirmedBalance = decimal.Decimal(0.0)
unconfirmedTxApperances = 0
txApperances = 0
mempool = getmempool()
mptxs = {tx['txid']: tx for tx in mempool}
txraw = searchrawtx(address)
txs = {tx['txid']: tx for tx in txraw}
# collect mempool incoming
mptxs_own_vouts = {mptx: mptxs[mptx] for mptx in mptxs if mptx not in txs and has_my_vout(mptxs[mptx], address)}
# collect mempool outgoing
mptxs_own_vins = {}
for mptx in mptxs:
if mptx in txs:
continue
for vin in mptxs[mptx]['vin']:
if vin['txid'] in mptxs_own_vouts:
vout = locate_vout(mptxs_own_vouts[vin['txid']], vin['vout'])
if ismine(vout, address):
mptxs_own_vins[mptx] = mptxs[mptx]
break
elif vin['txid'] in txs:
for vout in txs[vin['txid']]['vout']:
if vout['n'] == vin['vout'] and ismine(vout, address):
mptxs_own_vins[mptx] = mptxs[mptx]
break
else:
break
#combine filtered mempool and addrindex records
txs = dict(list(mptxs_own_vouts.items()) + list(mptxs_own_vins.items()) + list(txs.items()))
for txid in txs:
tx = txs[txid]
vouts = [vout for vout in tx['vout'] if ismine(vout, address)]
for vout in vouts:
if tx['confirmations']:
totalReceived += vout['value']
else:
unconfirmedBalance += vout['value']
for vin in tx['vin']:
if 'txid' not in vin or vin['txid'] not in txs:
continue
vout = locate_vout(txs[vin['txid']]['vout'], vin['vout'])
if vout and ismine(vout, address):
if tx['confirmations']:
totalSent += vout['value']
else:
unconfirmedBalance -= vout['value']
if tx['confirmations']:
txApperances += 1
else:
unconfirmedTxApperances += 1
balance = totalReceived - totalSent
return {'addrStr': address,
'balance': float(balance),
'balanceSat': int(balance * 100000000),
'totalReceived': float(totalReceived),
'totalReceivedSat': int(totalReceived * 100000000),
'totalSent': float(totalSent),
'totalSentSat': int(totalSent * 100000000),
'unconfirmedBalance': float(unconfirmedBalance),
'unconfirmedBalanceSat': int(unconfirmedBalance * 100000000),
'unconfirmedTxApperances': unconfirmedTxApperances,
'txApperances': txApperances,
'transactions': sorted(txs.keys(), key=lambda k: txs[k]['confirmations'])}
# Unlike blockexplorers, does not provide 'spent' information on spent vouts.
# This information is not used in csfrblockd/csfrd anyway.
def gettransaction(tx_hash):
with decimal.localcontext(decimal.DefaultContext):
return rpc('getrawtransaction', [tx_hash, 1])
def get_pubkey_for_address(address):
#first, get a list of transactions for the address
address_info = getaddressinfo(address)
#if no transactions, we can't get the pubkey
if not address_info['transactio |
stvstnfrd/edx-platform | common/djangoapps/student/management/tests/test_bulk_unenroll.py | Python | agpl-3.0 | 5,578 | 0.002331 | """Tests for Bulk Un-enroll Management command"""
from tempfile import NamedTemporaryFile
import six
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from testfixtures import LogCapture
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.models import BulkUnenrollConfiguration, CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
LOGGER_NAME = 'common.djangoapps.student.management.commands.bulk_unenroll'
class BulkUnenrollTests(SharedModuleStoreTestCase):
"""Test Bulk un-enroll command works fine for all test cases."""
def setUp(self):
super(BulkUnenrollTests, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.course = CourseFactory.create()
self.audit_mode = CourseModeFactory.create(
course_id=self.course.id,
mode_slug='audit',
mode_display_name='Audit',
)
self.user_info = [
('amy', 'amy@pond.com', 'password'),
('rory', 'rory@theroman.com', 'password'),
('river', 'river@song.com', 'password')
]
self.enrollments = []
self.users = []
for username, email, password in self.user_info:
user = UserFactory.create(
username=username, email=email, password=password
)
self.users.append(user)
self.enrollments.append(
CourseEnrollment.enroll(user, self.course.id, mode='audit')
)
def _write_test_csv(self, csv, lines):
"""Write a test csv file with the lines provided"""
csv.write(b"username,course_id\n")
for line in lines:
csv.write(six.b(line))
csv.seek(0)
return csv
def test_invalid_course_key(self):
"""Verify in case of invalid course key warning is logged."""
with NamedTemporaryFile() as csv:
csv = self._write_test_csv(csv, lines=["amy,test_course\n"])
with LogCapture(LOGGER_NAME) as log:
call_command("bulk_unenroll", "--csv_path={}".format(csv.name), "--commit")
expected_message = 'Invalid course id {}, skipping un-enrollement.'.\
format('test_course')
log.check_present(
(LOGGER_NAME, 'WARNING', expected_message)
)
def test_bulk_un_enroll(self):
"""Verify users are unenrolled using the command."""
lines = [
enrollment.user.username + "," +
str(enrollment.course.id) + "\n"
for enrollment in self.enrollments
]
with NamedTemporaryFile() as csv:
csv = self._write_test_csv(csv, lines=lines)
call_command("bulk_unenroll", "--csv_path={}".format(csv.name), "--commit")
for enrollment in CourseEnrollment.objects.all():
assert enrollment.is_active is False
def test_bulk_un_enroll_without_commit(self):
"""
Verify the ability to dry-run the command.
"""
lines = [
enrollment.user.username + "," +
str(enrollment.course.id) + "\n"
for enrollment in self.enrollments
]
with NamedTemporaryFile() as csv:
csv = self._write_test_csv(csv, lines=lines)
call_command("bulk_unenroll", "--csv_path={}".format(csv.name))
for enrollment in CourseEnrollment.objects.all():
assert enrollment.is_active is True
def test_bulk_unenroll_from_config_model(self):
"""Verify users are unenrolled using the command."""
lines = "user_id,username,email,course_id\n"
for enrollment in self.enrollments:
lines += str(enrollment.user.id) + "," + enrollment.user.username + "," + \
enrollment.user.email + "," + str(enrollment.course.id) + "\n"
csv_file = SimpleUploadedFile(name='test.csv', content=lines.encode('utf-8'), content_type='text/csv')
BulkUnenrollConfiguration.objects.create(enabled=True, csv_file=csv_file)
call_command("bulk_unenroll", "--commit")
for enrollment in CourseEnrollment.objects.all():
assert enrollment.is_active is False
def test_users_unenroll_successfully_logged(self):
"""Verify users unenrolled are logged """
lines = "username,course_id\n"
lines += self.enrollments[0].username + "," + str(self.enrollments[0].course.id) + "\n"
csv_file = SimpleUploadedFile(name='test.csv', content=lines.encode('utf-8'), content_type='text/csv')
BulkUnenrollConfiguration.objects.create(enabled=True | , csv_file=csv_file)
course_id = self.enrollments[0].course.id
| with LogCapture(LOGGER_NAME) as log:
call_command("bulk_unenroll", "--commit")
log.check(
(
LOGGER_NAME,
'INFO',
'Processing [{}] with [1] enrollments.'.format(course_id),
),
(
LOGGER_NAME,
'INFO',
'User [{}] have been successfully unenrolled from the course: {}'.format(
self.enrollments[0].username, self.enrollments[0].course.id
)
),
)
|
pmghalvorsen/gramps_branch | gramps/gen/plug/menu/_placelist.py | Python | gpl-2.0 | 1,794 | 0.003344 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a list of places.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import Option
#-------------------------------------------------------------------------
#
# PlaceListOption class
#
#-------------------------------------------------------------------------
class PlaceListOption(Option):
"""
This class describes a widget that allows multiple places from the
database to be selected | .
"""
def __init__(self, label):
"""
:param label: A label to be applied to this option.
Example: "Places"
:type | label: string
:param value: A set of GIDs as initial values for this option.
Example: "111 222 333 444"
:type value: string
:return: nothing
"""
Option.__init__(self, label, "")
|
clede/Radiotrack | radiotracking/migrations/0003_program_day_string.py | Python | apache-2.0 | 472 | 0 | # -*- coding: utf-8 -*-
# | Generated by Django 1.10.1 on 2016-09-29 05:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('radiotracking', '0002_auto_20160918_0244'),
]
operations = [
migrations.AddField(
model_name='program' | ,
name='day_string',
field=models.CharField(default='', max_length=30),
),
]
|
colour-science/colour-hdri | colour_hdri/generation/tests/test_radiance.py | Python | bsd-3-clause | 2,680 | 0.000373 | # !/usr/bin/env python
"""Defines the unit tests for the :mod:`colour_hdri.generation.radiance` module."""
from __future__ import annotations
import numpy as np
import os
import unittest
from colour import RGB_COLOURSPACES
from colour.hints import List
from colour_hdri import TESTS_RESOURCES_DIRECTORY
from colour_hdri.generation import image_stack_to_radiance_image
from colour_hdri.calibration import camera_response_functions_Debevec1997
from colour_hdri.utilities import ImageStack, filter_files
__author__ = "Colour Developers"
__copyright__ = "Copyright 2015 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org | "
__status__ = "Production"
__all__ = [
"FROBISHER_001_DIRECTORY",
"GENERATION_DIRECTORY",
"JPG_IMAGES",
"TestRadianceImage",
]
FROBISHER_001_DIRECTORY: str = os.path.join(
TESTS_RESOURCES_DIRECTORY, "frobisher_001"
)
GENERATION_DIRECTORY: str = os.path.join(
TESTS_RESOURCES_DIRECTORY, "colour_hdri", "generation"
)
JPG_IMAGES: List[str] = filter_files(FROBISHER_001_DIRECTORY, ("jpg",))
class TestRadianceImage(unittest.TestCase):
"""
Define :func:`co | lour_hdri.generation.radiance.\
image_stack_to_radiance_image` definition unit tests methods.
"""
def test_radiance_image(self):
"""
Test :func:`colour_hdri.generation.\
radiance.image_stack_to_radiance_image` definition.
"""
image_stack = ImageStack.from_files(JPG_IMAGES)
image_stack.data = RGB_COLOURSPACES["sRGB"].cctf_decoding(
image_stack.data
)
# Lower precision for unit tests under *travis-ci*.
np.testing.assert_allclose(
image_stack_to_radiance_image(image_stack),
np.load(
os.path.join(
GENERATION_DIRECTORY, "test_radiance_image_linear.npy"
)
),
rtol=0.0001,
atol=0.0001,
)
# Lower precision for unit tests under *travis-ci*.
image_stack = ImageStack.from_files(JPG_IMAGES)
np.testing.assert_allclose(
image_stack_to_radiance_image(
image_stack,
camera_response_functions=(
camera_response_functions_Debevec1997(image_stack)
),
),
np.load(
os.path.join(
GENERATION_DIRECTORY, "test_radiance_image_crfs.npy"
)
),
rtol=0.0001,
atol=0.0001,
)
if __name__ == "__main__":
unittest.main()
|
melkamar/webstore-manager | tests/test_util.py | Python | mit | 2,782 | 0 | import requests
import pytest
import json
import os
import zipfile
from flexmock import flexmock
import webstore_manager.util as util
from webstore_manager.util import pushd, temp_dir
def test_check_requests_response_status_ok():
response = flexmock()
response.should_receive('raise_for_status').and_return(True)
util.handle_requests_response_status(response)
def test_check_requests_response_status_fail():
response = flexmock()
response.should_receive('raise_for_status') | .and_raise(requests.HTTPError)
response.should_receive('json').and_return(json.dumps({"foo": "bar"}))
with pytest.raises(requests.HTTPError):
util.handle_requests_response_status(response)
def test_read_json_key_ok():
assert util.read_json_key({"foo": "bar"}, "foo") == "bar"
def test_read_json_key_fail():
with pytest.raises(KeyError):
util. | read_json_key({"foo": "bar"}, "invalid")
def test_makezip_nodest():
zip_path = os.path.join(os.getcwd(), 'tests/files/sample_folder')
temp_path = 'tests/files/temp_test_makezip'
zip_name = 'testzip.zip'
with temp_dir(temp_path):
with pushd(temp_path):
assert not os.path.exists(zip_name)
util.make_zip(zip_name, zip_path)
assert os.path.exists(zip_name)
archive = zipfile.ZipFile(zip_name, 'r')
txt = archive.read('hello').decode("utf-8")
assert txt.startswith('Sample bare content')
archive.close()
os.remove(zip_name)
assert not os.path.exists(zip_name)
def test_makezip_dest():
zip_path = os.path.join(os.getcwd(), 'tests/files/sample_folder')
temp_path = 'tests/files/temp_test_makezip'
zip_name = 'testzip.zip'
with temp_dir(temp_path):
assert not os.path.exists(zip_name)
assert not os.path.exists(os.path.join(temp_path, zip_name))
util.make_zip(zip_name, zip_path, dest_dir=temp_path)
assert os.path.exists(os.path.join(temp_path, zip_name))
archive = zipfile.ZipFile(os.path.join(temp_path, zip_name), 'r')
txt = archive.read('hello').decode("utf-8")
assert txt.startswith('Sample bare content')
archive.close()
os.remove(os.path.join(temp_path, zip_name))
assert not os.path.exists(os.path.join(temp_path, zip_name))
def test_unzip():
unzip_path = 'tests/files/temp_test_unzip'
with temp_dir(unzip_path):
assert not os.path.exists(os.path.join(unzip_path, 'hello'))
util.unzip('tests/files/sample_zip.zip', unzip_path)
assert os.path.exists(os.path.join(unzip_path, 'hello'))
with open(os.path.join(unzip_path, 'hello')) as f:
txt = f.read()
assert txt.startswith('Sample content of zip')
|
google/deepvariant | deepvariant/multisample_make_examples.py | Python | bsd-3-clause | 9,287 | 0.004092 | # Copyright 2021 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
r"""Experimental multi-sample make_examples for DeepVariant.
This is a prototype for experimentation with multiple samples in DeepVariant, a
proof of concept enabled by a refactoring to join together DeepVariant and
DeepTrio, generalizing the functionality of make_examples to work with multiple
samples.
The output of this script is not compatible with any of DeepVariant's public
models, and the DeepVariant team does not intend to provide support for users
of this script.
Example usage:
multisample_make_examples \
--mode calling \
--ref "reference.fa" \
--reads "sample1.bam;sample2.bam;sample3.bam;sample4.bam;sample5.bam" \
--sample_names "sample1;sample2;sample3;sample4;sample5" \
--examples "examples.tfrecord.gz" \
--pileup_image_heights "20;20;20;20;20" \ # optional
--downsample_fractions "0.5;0.5;0.5;0.5;0.5" # optional
"""
import os
from absl import app
from absl import flags
from deepvariant import logging_level
from deepvariant import make_examples_core
from deepvariant import make_examples_options
from deepvariant.protos import deepvariant_pb2
from third_party.nucleus.io.python import hts_verbose
from third_party.nucleus.util import errors
from third_party.nucleus.util import proto_utils
MAIN_SAMPLE_INDEX = 0 # This is the first sample listed in --reads.
FLAGS = flags.FLAGS
# Adopt more general flags from make_examples_options.
flags.adopt_module_key_flags(make_examples_options)
# Define flags specific to multi-sample make_examples.
flags.DEFINE_string(
'reads', None, 'Required. A list of BAM/CRAM files, with different '
'samples separated by semi-colons. '
'At least one aligned, sorted, indexed BAM/CRAM file is required for '
'each sample. '
'All must be aligned to the same reference genome compatible with --ref. '
'Can provide multiple BAMs (comma-separated) for each sample. '
'Format is, for example: sample1;sample2_BAM1,sample2_BAM2;sample3 ')
flags.DEFINE_string(
'sample_names', 'DEFAULT',
'Sample names corresponding to the samples (must match order and length of '
'samples in --reads). Separate names for each sample with semi-colons, '
'e.g. "sample1;sample2;sample3". '
'If not specified, (i.e. "sample1;;sample3" or even ";;") '
'any sample without a sample name from this flag the will be inferred from '
'the header information from --reads.')
flags.DEFINE_string(
'downsample_fractions', 'DEFAULT',
'If not empty string ("") must be a value between 0.0 and 1.0. '
'Reads will be kept (randomly) with a probability of downsample_fraction '
'from the input sample BAMs. This argument makes it easy to create '
'examples as though the input BAM had less coverage. '
'Similar to --reads and --sample_name, supply different '
'values for each sample by separating them with semi-colons, '
'where the order of samples is the same as in --reads.')
flags.DEFINE_string(
'pileup_image_heights', 'DEFAULT',
'Height for the part of the pileup image showing reads from each sample. '
'By default, use a height of 100 for all samples. '
'Similar to --reads and --sample_name, supply different '
'values for each sample by separating them with semi-colons, '
'where the order of samples is the same as in --reads.')
def n_samples_from_flags(add_flags=True, flags_obj=None):
"""Collects sample-related options into a list of samples."""
n_reads = flags_obj.reads.split(';')
num_samples = len(n_reads)
flags_organized = {}
for flag_name in [
'reads', 'sample_names', 'downsample_fractions', 'pileup | _image_heights'
]:
if flags_obj[flag_name].value != 'DEFAULT':
flags_organized[flag_name] = | flags_obj[flag_name].value.split(';')
if len(flags_organized[flag_name]) != num_samples:
raise ValueError(f'--{flag_name} has {len(flags_organized[flag_name])} '
f'samples, but it should be matching the number of '
f'samples in --reads, which was {num_samples}.')
else:
flags_organized[flag_name] = [''] * num_samples
n_sample_options = []
for i in range(num_samples):
sample_name = make_examples_core.assign_sample_name(
sample_name_flag=flags_organized['sample_names'][i],
reads_filenames=flags_organized['reads'][i])
n_sample_options.append(
deepvariant_pb2.SampleOptions(
role=str(i),
name=sample_name,
variant_caller_options=make_examples_core.make_vc_options(
sample_name=sample_name, flags_obj=flags_obj),
order=range(num_samples),
pileup_height=100))
if add_flags:
for i in range(num_samples):
n_sample_options[i].reads_filenames.extend(
flags_organized['reads'][i].split(','))
if flags_organized['downsample_fractions'][i]:
n_sample_options[i].downsample_fraction = float(
flags_organized['downsample_fractions'][i])
if flags_organized['pileup_image_heights'][i]:
n_sample_options[i].pileup_height = int(
flags_organized['pileup_image_heights'][i])
# Ordering here determines the default order of samples, and when a sample
# above has a custom .order, then this is the list those indices refer to.
samples_in_order = n_sample_options
sample_role_to_train = '0'
return samples_in_order, sample_role_to_train
def default_options(add_flags=True, flags_obj=None):
"""Creates a MakeExamplesOptions proto populated with reasonable defaults.
Args:
add_flags: bool. defaults to True. If True, we will push the value of
certain FLAGS into our options. If False, those option fields are left
uninitialized.
flags_obj: object. If not None, use as the source of flags, else use global
FLAGS.
Returns:
deepvariant_pb2.MakeExamplesOptions protobuf.
Raises:
ValueError: If we observe invalid flag values.
"""
if not flags_obj:
flags_obj = FLAGS
samples_in_order, sample_role_to_train = n_samples_from_flags(
add_flags=add_flags, flags_obj=flags_obj)
options = make_examples_options.shared_flags_to_options(
add_flags=add_flags,
flags_obj=flags_obj,
samples_in_order=samples_in_order,
sample_role_to_train=sample_role_to_train,
main_sample_index=MAIN_SAMPLE_INDEX)
if add_flags:
options.bam_fname = '|'.join(
[os.path.basename(x) for x in flags_obj.reads.split(';')])
return options
def check_options_are_valid(options):
"""Checks that all the options chosen make sense together."""
# Check for general flags (shared for DeepVariant and DeepTrio).
make_examples_optio |
0xbaadf00d/phial | phial/jinja_tools/ext/__init__.py | Python | mit | 194 | 0 | # -*- coding: utf-8 -*-
from .jinja2htmlcompress import SelectiveHTMLCompress
fro | m .jinja2datetime import DatetimeExtension
__all__ = [
'DatetimeExtension',
'SelectiveHTMLC | ompress',
]
|
mapzen/vector-datasource | integration-test/1622-hide-early-university.py | Python | mit | 3,553 | 0 | # -*- encoding: utf-8 -*-
from . import FixtureTest
class HideEarlyUniversityTest(FixtureTest):
def test_large_university(self):
import dsl
z, x, y = (10, 165, 399)
self.generate_fixtures(
# https://www.openstreetmap.org/relation/309918
dsl.way(309918, dsl.box_area(z, x, y, 14614122), {
'amenity': 'university',
'name': 'California State University Monterey Bay',
'source' | : 'openstr | eetmap.org',
'type': 'multipolygon',
'website': 'https://csumb.edu/',
'wikidata': 'Q624686',
'wikipedia': 'en:California State University, Monterey Bay',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 309918,
'kind': 'university',
'min_zoom': 10,
})
def test_medium_university(self):
import dsl
z, x, y = (11, 328, 794)
self.generate_fixtures(
# https://www.openstreetmap.org/way/29268613
dsl.way(29268613, dsl.box_area(z, x, y, 7165443), {
'amenity': 'university',
'ele': '22',
'gnis:county_id': '085',
'gnis:created': '01/19/1981',
'gnis:edited': '01/04/2008',
'gnis:feature_id': '235365',
'gnis:state_id': '06',
'name': 'Stanford University',
'official_name': 'Leland Stanford Junior University',
'official_name:en': 'Leland Stanford Junior University',
'source': 'openstreetmap.org',
'wikidata': 'Q41506',
'wikipedia': 'en:Stanford University',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 29268613,
'kind': 'university',
'min_zoom': 11,
})
def test_college_1(self):
import dsl
z, x, y = (12, 658, 1585)
self.generate_fixtures(
# https://www.openstreetmap.org/way/124842735
dsl.way(124842735, dsl.box_area(z, x, y, 600142), {
'addr:street': '25555 Hesperian Blvd',
'amenity': 'college',
'ele': '13',
'gnis:county_id': '001',
'gnis:created': '01/19/1981',
'gnis:edited': '01/04/2008',
'gnis:feature_id': '220866',
'gnis:state_id': '06',
'name': 'Chabot College',
'source': 'openstreetmap.org',
'wikidata': 'Q5066011',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 124842735,
'kind': 'college',
'min_zoom': 12,
})
def test_college_2(self):
import dsl
z, x, y = (12, 658, 1582)
self.generate_fixtures(
# https://www.openstreetmap.org/way/31486857
dsl.way(31486857, dsl.box_area(z, x, y, 859573), {
'amenity': 'college',
'name': "St. Mary's College of California",
'source': 'openstreetmap.org',
'wikipedia': "en:Saint Mary's College of California",
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'id': 31486857,
'kind': 'college',
'min_zoom': 12,
})
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extract夢見る世界.py | Python | bsd-3-clause | 772 | 0.046543 | def extract夢見る世界(item):
"""
Parser for '夢見る世界'
"""
if 'Otome Games' in item['tags']:
return None
if 'Drama CDs' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower() | :
return None
tagmap = [
('Miss Appraiser and Gallery Demon', 'Miss Appraiser and Gallery Demon', 'translated'),
('Light Bey | ond Road\'s End', 'Light Beyond (LN)', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
byt3bl33d3r/CrackMapExec | cme/modules/zerologon.py | Python | bsd-2-clause | 3,386 | 0.006202 | # everything is comming from https://github.com/dirkjanm/CVE-2020-1472
# credit to @dirkjanm
# module by : @mpgn_x64
import sys
from impacket.dcerpc.v5 import nrpc, epm
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5 import transport
from impacket import crypto
import hmac, hashlib, struct, sys, socket, time
from binascii import hexlify, unhexlify
from subprocess import check_call
# Give up brute-forcing after this many attempts. If vulnerable, 256 attempts are expected to be neccessary on average.
MAX_ATTEMPTS = 2000 # False negative chance: 0.04%
class CMEModule:
name = 'zerologon'
description = "Module to check if the DC is vulnerable to Zerologon aka CVE-2020-1472"
supported_protocols = ['smb']
opsec_safe = True
multiple_hosts = False
def options(self, context, module_options):
'''
NOP No options
'''
def on_login(self, context, connection):
if perform_attack('\\\\' + connection.hostname, connection.host, connection.hostname):
context.log.highlight("VULNERABLE")
context.log.highlight("Next step: https://github.com/dirkjanm/CVE-2020-1472")
def fail(msg):
logging.debug(msg, file=sys.stderr)
logging.debug('This might have been caused by invalid arguments or network issues.', file=sys.stderr)
sys.exit(2)
def try_zero_authenticate(rpc_con, dc_handle, dc_ip, target_computer):
# Connect to the DC's Netlogon service.
# Use an all-zero challenge and credential.
plaintext = b'\x00' * 8
ciphertext = b'\x00' * 8
# Standard flags observed from a Windows 10 client (including AES), with only the sign/seal flag disabled.
flags = 0x212fffff
# Send challenge and authentication request.
nrpc.hNetrServerReqChallenge(rpc_con, dc_handle + '\x00', target_computer + '\x00', plaintext)
try:
server_auth = nrpc.hNetrServerAuthenticate3(
rpc_con, dc_handle + '\x00', target_computer + '$\x00', nrpc.NETLOGON_SECURE_CHANNEL_TYPE.ServerSecureChannel,
target_computer + '\x00', ciphertext, flags
)
# It worked!
assert server_auth['ErrorCode'] == 0
return True
except nrpc.DCERPCSessionError as ex:
# Failure should be due to a STATUS_ACCESS_DENIED error. Otherwise, the atta | ck is probably not working.
if ex.get_error_code() == 0xc0000022:
return None
else:
fail(f'Unexpected error code from DC: {ex.get_error_code()}.')
except BaseException as ex:
fail | (f'Unexpected error: {ex}.')
def perform_attack(dc_handle, dc_ip, target_computer):
# Keep authenticating until succesfull. Expected average number of attempts needed: 256.
logging.debug('Performing authentication attempts...')
rpc_con = None
binding = epm.hept_map(dc_ip, nrpc.MSRPC_UUID_NRPC, protocol='ncacn_ip_tcp')
rpc_con = transport.DCERPCTransportFactory(binding).get_dce_rpc()
rpc_con.connect()
rpc_con.bind(nrpc.MSRPC_UUID_NRPC)
for attempt in range(0, MAX_ATTEMPTS):
result = try_zero_authenticate(rpc_con, dc_handle, dc_ip, target_computer)
if result is None:
logging.debug('=', end='', flush=True)
else:
break
if result:
return True
else:
logging.debug('\nAttack failed. Target is probably patched.')
|
sgiavasis/nipype | nipype/interfaces/camino/__init__.py | Python | bsd-3-clause | 848 | 0 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Camino top l | evel namespace
"""
from .conne | ctivity import Conmat
from .convert import (Image2Voxel, FSL2Scheme, VtkStreamlines, ProcStreamlines,
TractShredder, DT2NIfTI, NIfTIDT2Camino, AnalyzeHeader,
Shredder)
from .dti import (DTIFit, ModelFit, DTLUTGen, PicoPDFs, Track, TrackPICo,
TrackBayesDirac, TrackDT, TrackBallStick, TrackBootstrap,
TrackBedpostxDeter, TrackBedpostxProba,
ComputeFractionalAnisotropy, ComputeMeanDiffusivity,
ComputeTensorTrace, ComputeEigensystem, DTMetric)
from .calib import (SFPICOCalibData, SFLUTGen)
from .odf import (QBallMX, LinRecon, SFPeaks, MESD)
from .utils import ImageStats
|
dirtycold/git-cola | cola/version.py | Python | gpl-2.0 | 2,260 | 0 | # Copyright (c) David Aguilar
"""Provide git-cola's version number"""
from __future__ import division, absolute_import, unicode_literals
import os
import sys
if __name__ == '__main__':
srcdir = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(1, srcdir)
from .git import git
from .git import STDOUT
from .decorators import memoize
from ._version import VERSION
# minimum version requirements
_versions = {
# git diff learned --patience in 1.6.2
# git mergetool learned --no-prompt in 1.6.2
# git difftool moved out of contrib in git 1.6.3
'git': '1.6.3',
'python': '2.6',
# git diff --submodule was introduced in 1.6.6
'diff-submodule': '1.6.6',
# git check-ignore was introduced in 1.8.2, but did not follow the same
# rules as git add and git status until 1.8.5
'check-ignore': '1.8.5',
}
def get(key):
"""Returns an entry from the known versions table"""
return _versions.get(key)
def version():
"""Returns the current version"""
return VERSION
@memoize
def check_version(min_ver, ver):
"""Check whether ver is greater or equal to min_ver
"""
min_ver_list = version_to_list(min_ver)
ver_list = version_to_list(ver)
return min_ver_list <= ver_list
@memoize
def check(key, ver):
"""Checks if a version is greater than the known version for <what>"""
return check_version(get(key), ver)
def version_to_list(version):
"""Convert a version string to a list of numbers or strings
"""
ver_list = []
for p in version.split('.'):
try:
n = int(p)
except Val | ueError:
n = p
ver_list.append(n)
return ver_list
|
@memoize
def git_version_str():
"""Returns the current GIT version"""
return git.version()[STDOUT].strip()
@memoize
def git_version():
"""Returns the current GIT version"""
parts = git_version_str().split()
if parts and len(parts) >= 3:
return parts[2]
else:
# minimum supported version
return '1.6.3'
def print_version(brief=False):
if brief:
msg = '%s\n' % version()
else:
msg = 'cola version %s\n' % version()
sys.stdout.write(msg)
if __name__ == '__main__':
print_version(brief=True)
|
nortxort/tinybot-rtc | apis/other.py | Python | mit | 3,454 | 0.002316 | """ Contains functions to fetch info from different simple online APIs."""
import util.web
def urbandictionary_search(search):
"""
Searches urbandictionary's API for a given search term.
:param search: The search term str to search for.
:return: defenition str or None on no match or error.
"""
if str(search).strip():
urban_api_url = 'http://api.urbandictionary.com/v0/define?term=%s' % search
response = util.web.http_get(url=urban_api_url, json=True)
if response['json'] is not None:
try:
definition = response['json']['list'][0]['definition']
return definition.encode('ascii', 'ignore')
except (KeyError, IndexError):
return None
else:
return None
def weather_search(city):
"""
Searches worldweatheronline's API for weather data for a given city.
You must have a working API key to be able to use this function.
:param city: The city str to search for.
:return: weather data str or None on no match or error.
"""
if str(city).strip():
api_key = ''
if not api_key:
return 'Missing api key.'
else:
weather_api_url = 'http://api.worldweatheronline.com/premium/v1/weather.ashx?key=%s&q=%s&format=json' % \
(api_key, city)
response = util.web.http_get(url=weather_api_url, json=True)
if response['json'] is not None:
try:
pressure = response['json']['data']['current_condition'][0]['pressure']
temp_c = response['json']['data']['current_condition'][0]['temp_C']
temp_f = response['json']['data']['current_condition'][0]['temp_F']
query = response['json']['data']['request'][0]['query'].encode('ascii', 'ignore')
result = '%s. Temperature: %sC (%sF) Pressure: %s millibars' % (query, temp_c, temp_f, pressure)
return result
except (IndexError, KeyError):
return None
else:
return None
def whois(ip):
"""
Searches ip-api for information about a given IP.
:param ip: The ip str to search for.
:return: information str or None on error.
"""
if str(ip).strip():
url = 'http://ip-api.com/json/%s' % ip
response = util.web.http_get(url=url, json=True)
if response['json'] is not None:
try:
city = response['json']['city']
country = response['json']['country']
isp = response['json']['isp']
org = response['json']['org']
region = response['json']['regionName']
zipcode = response['json']['zip']
info = country + ', ' + city + ', ' + region + ', Zipcode: ' + zipcode + ' Isp: ' + isp + '/' + org
return info
except KeyError:
return None
else:
return None
def chuck_norris():
"""
Finds a random Chuck Norris joke/quote.
:return: joke str or None on failure.
"""
url = 'http://api.icndb.com/jokes/random/?escape=javascript'
response = util.web.http_get(url=url, json=True)
if resp | onse['json'] is not None:
if response['json']['type'] == 'success':
joke | = response['json']['value']['joke']
return joke
return None
|
benqiu2016/nifi-minifi-cpp | thirdparty/librdkafka-0.11.1/packaging/nuget/release.py | Python | apache-2.0 | 2,393 | 0.002507 | #!/usr/bin/env python
#
| #
# NuGet release packaging tool.
# Creates a NuGet package from CI artifacts on S3.
#
import sys
import argparse
import packaging
dry_run = False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--no-s3", help="Don't collect from S3", action="store_true")
parser.add_argument("--dry-run",
help="Locate artif | acts but don't actually download or do anything",
action="store_true")
parser.add_argument("--directory", help="Download directory (default: dl-<tag>)", default=None)
parser.add_argument("--no-cleanup", help="Don't clean up temporary folders", action="store_true")
parser.add_argument("--sha", help="Also match on this git sha1", default=None)
parser.add_argument("tag", help="Git tag to collect")
args = parser.parse_args()
dry_run = args.dry_run
if not args.directory:
args.directory = 'dl-%s' % args.tag
match = {'tag': args.tag}
if args.sha is not None:
match['sha'] = args.sha
arts = packaging.Artifacts(match, args.directory)
# Collect common local artifacts, such as support files.
arts.collect_local('common', req_tag=False)
if not args.no_s3:
arts.collect_s3()
else:
arts.collect_local(arts.dlpath)
if len(arts.artifacts) == 0:
raise ValueError('No artifacts found for %s' % match)
print('Collected artifacts:')
for a in arts.artifacts:
print(' %s' % a.lpath)
print('')
package_for = [packaging.NugetPackage]
packages = list()
print('Packaging classes: %s' % package_for)
for pcl in package_for:
p = pcl(match['tag'], arts)
packages.append(p)
print('')
if dry_run:
sys.exit(0)
# Build packages
print('Building packages:')
pkgfiles = []
for p in packages:
paths = p.build(buildtype='release')
for path in paths:
# Verify package files
if p.verify(path):
pkgfiles.append(path)
if not args.no_cleanup:
p.cleanup()
else:
print(' --no-cleanup: leaving %s' % p.stpath)
print('')
if len(pkgfiles) > 0:
print('Created packages:')
for pkg in pkgfiles:
print(pkg)
else:
print('No packages created')
sys.exit(1)
|
fnp/wolnelektury | src/sponsors/__init__.py | Python | agpl-3.0 | 236 | 0 | # | This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
__author__ = 'Marek Stępniowski, <marek@stepniowski.com>'
__version__ = '0 | .1'
|
joedeller/pymine | forloop.py | Python | mit | 1,535 | 0.000651 | #! /usr/bin/pyt | hon
# Joe Deller 2014
# Using for loops
# Level : Beginner
# Uses : Libraries, variables, operators, loops
# Loops are a very important part of programming
# The for loop is a very common loop
# It counts from a starting number to a finishing number
# It normally counts up in ones, but you can count up
# in any number you want, or count downwards
#
# The wool block in minecraft can be any one of 16 different colours
# from 0, a white block, to 15, a black block
# T | his program uses a for loop to draw wool blocks
# of all 16 different colours
# It also uses the for loop to set where the block is drawn
# so we can see all 16 colours
import mcpi.minecraft as minecraft
import mcpi.block as block
import time
# Setup the connection and clear a space
# set us down in the middle of the world
mc = minecraft.Minecraft.create()
x, y, z = mc.player.getPos()
mc.setBlocks(x - 20, y, z - 20, x + 20, y + 20, z + 20, block.AIR)
mc.setBlocks(z - 20, y - 1, z - 20, y, z + 20, block.GRASS.id)
for colour in range(0, 15):
# draw upwards
mc.setBlock(x + 15, y + 2 + colour, z + 2, block.WOOL.id, colour)
# draw across
mc.setBlock(x + colour, y + 2, z + 2, block.WOOL.id, colour)
time.sleep(.5)
# Counting backwards, using a negative number to say how quickly to count backwards
# Try changing this to -2 and see what happens
for colour in range(15, 0, -1):
mc.setBlock(x, 1 + colour, z + 2, block.WOOL.id, colour)
mc.setBlock(colour, y + 16, z + 2, block.WOOL.id, colour)
time.sleep(.1)
|
sandz-in/twilio_trello | twilio_sms_handler/apps.py | Python | mit | 174 | 0 | # | -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
clas | s TwilioSmsHandlerConfig(AppConfig):
name = 'twilio_sms_handler'
|
github-borat/cinder | cinder/tests/test_netapp_nfs.py | Python | apache-2.0 | 47,799 | 0.000063 |
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn(' | /mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
| drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address |
mozillazg/lark | lark/music/models.py | Python | mit | 1,119 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.db import models
from django.utils.encoding import python_2 | _unicode_compatible
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
@python_2_unicode_compatible
class Music(TimeStampedModel):
title = models.CharField(_('title'), max_length=100)
author = models.CharField(_('author'), max_length=50)
cover = models.URLField(_('album cover'))
mp3 = models.URLField(_('mp3 file url'))
ogg = models.URLFiel | d(_('ogg file url'))
douban = models.URLField(_('douban page'), blank=True)
sid = models.CharField(_('SID'), max_length=10, blank=True)
class Meta:
verbose_name = _('music')
verbose_name_plural = _('music')
def __str__(self):
return '"{0}" by {1}'.format(self.title, self.author)
def save(self, *args, **kwargs):
super(Music, self).save(*args, **kwargs)
if not self.sid:
self.sid = self.pk
self.save(update_fields=('sid',))
|
yhtang/MUI | wrappers/Python/mui4py/common.py | Python | apache-2.0 | 4,086 | 0.007097 | import collections
import mui4py.mui4py_mod as mui4py_mod
from mui4py.config import get_default_config
from mui4py.types import map_type, get_float_type_str, get_int_type_str, get_io_type_str
import re
import numpy as np
class CppClass(object):
def __init__(self, config=None, args=(), kwargs={}):
self._cpp_class_name = None
self._cpp_point_class_name = None
self.ra | w_point = None
self.raw = N | one
self.io_data_type = None
# Convertir args en Args()
self.args = tuple([Arg(a) if not issubclass(a.__class__, Arg) else a for a in args])
self.namespace = ""
# Filter None-valued entries to gake C++ default values.
self.kwargs = {k: v for k, v in kwargs.items() if v is not None}
self.configured = False
self._ALLOWED_IO_TYPES = None
if config is None:
self.config = get_default_config()
else:
self.config = config
self.signature = self._signature()
def _signature(self):
sig = self._split_class_name()
args_str = [str(a) for a in self.get_plain_args()]
kwargs_str = ["{}={}".format(k,v) for k,v in self.kwargs.items()]
if args_str:
sig += "_ARGS_" + "_".join(args_str)
if kwargs_str:
sig += "_KWARGS_" + "_".join(kwargs_str)
return sig
def _split_class_name(self, title=True):
tokens = re.findall('[A-Z][^A-Z]*', self.__class__.__name__)
tokens = [t.lower() for t in tokens]
if title:
tokens[0] = tokens[0].title()
return "_".join(tokens)
def get_plain_args(self):
return tuple([a.arg for a in self.args])
def get_plain_kwargs(self):
return
def configure(self, config, io_data_type=None, cpp_obj=None, onlycheck=False):
self.config = config
self.point_class_name = get_cpp_name("Point", config.dim,\
config.float_type, config.int_type)
self.raw_point = getattr(mui4py_mod, self.point_class_name)
self.io_data_type = map_type[io_data_type]
if self.io_data_type is not None and self.io_data_type not in self._ALLOWED_IO_TYPES:
raise Exception("Data type not supported by spatial sampler ''. Supported types : {float, np.float32, np.float64, etc.}")
if onlycheck:
self.io_data_type = None
self.raw = cpp_obj
self._cpp_class_name = get_cpp_name(self._split_class_name(), config.dim, config.float_type,
config.int_type, namespace=self.namespace, type_io=self.io_data_type)
if self.raw is None:
# Configure class arguments
for a in self.args:
a.configure(config, self.raw_point)
self.raw = getattr(mui4py_mod, self._cpp_class_name)(*self.get_plain_args(), **self.kwargs)
self.configured = True
class Arg(object):
def __init__(self, arg):
self.arg = arg
def configure(self, config, cpp_point):
pass
class _Point(Arg):
def __init__(self, point_rep):
super(_Point, self).__init__(None)
self.point_rep = point_rep
def configure(self, config, cpp_point):
self.arg = array2Point(self.point_rep, config, cpp_point)
def array2Point(arr, config, cpp_point):
arr_aux = arr
if not isinstance(arr, list) and\
not isinstance(arr, tuple) and\
not isinstance(arr, np.ndarray):
arr_aux = [arr]
# TODO:Maybe check for point type?
if len(arr_aux) == config.dim:
return cpp_point(arr_aux)
else:
raise Exception("Size of point is different than uniface dimensions.")
def get_cpp_name(cname, dim, float_type, int_type, namespace="", type_io=None):
s = ""
if namespace:
s += "_" + namespace
s += "_{}{}d_{}_{}".format(cname, dim, get_float_type_str(float_type),\
get_int_type_str(int_type))
if type_io is not None:
s += "_" + get_io_type_str(type_io)
return s
|
opotowsky/cyclus.github.com | source/numpydoc/docscrape_sphinx.py | Python | bsd-3-clause | 9,437 | 0.001166 | from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list( | name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.s | trip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __ |
nuagenetworks/vspk-python | vspk/v6/nuconnectionendpoint.py | Python | bsd-3-clause | 14,146 | 0.00919 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUConnectionendpoint(NURESTObject):
""" Represents a Connectionendpoint in the VSD
Notes:
SSH (Secure Shell) is used to provide secure remote console access to NSGs deployed in branch locations. For additional security, you may restrict SSH access from specific host(s) by providing a list of source IP addresses.
"""
__rest_name__ = "connectionendpoint"
__resource_name__ = "connectionendpoints"
## Constants
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_END_POINT_TYPE_SOURCE = "SOURCE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a Connectionendpoint instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> connectionendpoint = NUConnectionendpoint(id=u'xxxx-xxx-xxx-xxx', name=u'Connectionendpoint')
>>> connectionendpoint = NUConnectionendpoint(data=my_dict)
"""
super(NUConnectionendpoint, self).__init__()
# Read/Write Attributes
self._ip_address = None
self._ip_type = None
self._ipv6_address = None
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._description = None
self._embedded_metadata = None
self._end_point_type = None
self._entity_scope = None
self._creation_date = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="ip_addres | s", remote_name="IPAddress", attribute_type=str, is | _required=False, is_unique=False)
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'IPV4', u'IPV6'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="end_point_type", remote_name="endPointType", attribute_type=str, is_required=False, is_unique=False, choices=[u'SOURCE'])
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def ip_address(self):
""" Get ip_address value.
Notes:
IP Address of the end point.
This attribute is named `IPAddress` in VSD API.
"""
return self._ip_address
@ip_address.setter
def ip_address(self, value):
""" Set ip_address value.
Notes:
IP Address of the end point.
This attribute is named `IPAddress` in VSD API.
"""
self._ip_address = value
@property
def ip_type(self):
""" Get ip_type value.
Notes:
IPv4 or IPv6.
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
IPv4 or IPv6.
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ipv6_address(self):
""" Get ipv6_address value.
Notes:
IPv6 address of the end point.
This attribute is named `IPv6Address` in VSD API.
"""
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
""" Set ipv6_address value.
Notes:
IPv6 address of the end point.
This attribute is named `IPv6Address` in VSD API.
"""
self._ipv6_address = value
@property
def name(self):
""" Get name value.
Notes:
Name of the connection endpoint.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the connection endpoint.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the us |
karlward/igsql | igsql/database.py | Python | gpl-3.0 | 537 | 0.001862 | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('postgresql://igsql:igsql@127.0.0.1/igsql' | )
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session. | query_property()
def init_db():
import igsql.model
Base.metadata.create_all(bind=engine)
|
EmreAtes/spack | var/spack/repos/builtin/packages/r-iranges/package.py | Python | lgpl-2.1 | 2,504 | 0.001198 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RIranges(RPackage):
"""Provides efficient low-level and highly
reusable S4 classes for storing,
manipulating and aggregating over annotated ranges of
integers. Implements an
algebra of range operations, including efficient
algorithms for finding overlaps
and nearest neighbors. Defines efficien | t list-like
| classes for storing, transforming
and aggregating large grouped data,
i.e., collections of atomic vectors and DataFrames."""
homepage = "https://www.bioconductor.org/packages/IRanges/"
url = "https://git.bioconductor.org/packages/IRanges"
list_url = homepage
version('2.12.0', git='https://git.bioconductor.org/packages/IRanges', commit='1b1748655a8529ba87ad0f223f035ef0c08e7fcd')
version('2.10.5', git='https://git.bioconductor.org/packages/IRanges', commit='b00d1d5025e3c480d17c13100f0da5a0132b1614')
depends_on('r-biocgenerics@0.21.1:', type=('build', 'run'), when='@2.10.5')
depends_on('r-biocgenerics@0.23.3:', type=('build', 'run'), when='@2.12.0')
depends_on('r-s4vectors@0.13.17:', type=('build', 'run'), when='@2.10.5')
depends_on('r-s4vectors@0.15.5:', type=('build', 'run'), when='@2.12.0')
depends_on('r@3.4.0:3.4.9', when='@2.10.5:')
|
angusgbishop/biast | makeDrink.py | Python | apache-2.0 | 13 | 0.076923 | def makeDr | ink | |
NicolasLM/crawler | crawler/crawler.py | Python | mit | 3,682 | 0.001086 | from urllib.parse import urlparse
from collections import namedtuple
import socket
import requests
from requests.packages import urllib3
from bs4 import BeautifulSoup
import rethinkdb as r
from celery import Celery
from celery.utils.log import get_task_logger
import pyasn
import geoip2.database, geoip2.errors
import crawler.conf as conf
logger = get_task_logger(__name__)
app = Celery('crawler')
app.config_from_object(conf.CeleryConf)
asn_db = pyasn.pyasn(conf.ASN_FILE)
geoip2_db = geoip2.database.Reader(conf.GEOIP2_FILE)
DomainInfo = namedtuple(
'DomainInfo',
['name', 'elapsed', 'headers', 'linked_domains', 'asn', 'country']
)
class UncrawlableDomain(Exception):
pass
def get_page(domain):
urls = ['http://' + domain, 'https://' + domain]
for url in urls:
try:
return requests.get('http://' + domain,
timeout=conf.REQUESTS_TIMEOUT)
except (requests.RequestException, urllib3.exceptions.HTTPError):
continue
raise UncrawlableDomain('Cannot crawl ' + domain)
def get_asn_from_ip(ip):
try:
return asn_db.lookup(ip)[0]
except ValueError:
return None
def get_country_from_ip(ip):
try:
return geoip2_db.country(ip).country.name
except (ValueError, geoip2.errors.AddressNotFoundError):
return None
def get_domain_info(domain):
response = get_page(domain)
if 'text/html' not in response.headers.get('Content-Type', ''):
raise UncrawlableDomain('Cannot crawl ' + domain)
domains = list()
soup = BeautifulSoup(response.content, 'html.parser')
for link in soup.find_all('a'):
parsed_link = urlparse(link.get('href'))
if parsed_link.netloc:
domains.append(parsed_link.netloc.lower())
try:
ip = socket.gethostbyname(domain)
asn = get_asn_from_ip(ip)
country = get_country_from_ip(ip)
except socket.gaierror:
asn = None
country = none
return DomainInfo(
name=domain,
elapsed=round(response.elapsed.microseconds / 1000),
headers=response.headers,
linked_domains=set(domains),
asn=asn,
country=country
)
def record_success(conn, domain_name, doma | in_info):
| r.table('domains').insert({
'name': domain_name,
'success': True,
'headers': domain_info.headers,
'elapsed': domain_info.elapsed,
'asn': domain_info.asn,
'country': domain_info.country,
'date': r.now()
}).run(conn)
logger.info('Fetched domain {} in {}ms'.format(domain_name,
domain_info.elapsed))
def record_failure(conn, domain_name):
r.table('domains').insert({
'name': domain_name,
'success': False,
'date': r.now()
}).run(conn)
logger.info('Could not fetch domain {}'.format(domain_name))
@app.task(name='crawler.crawl_domain')
def crawl_domain(domain):
# Connect to rethinkdb
conn = r.connect(host=conf.RethinkDBConf.HOST,
db=conf.RethinkDBConf.DB)
# Do not process already crawled domains
if r.table('domains').filter({'name': domain}).count().run(conn):
return
try:
domain_info = get_domain_info(domain)
except UncrawlableDomain:
record_failure(conn, domain)
return
# Create a task for each domain not seen yet
for linked_domain in domain_info.linked_domains:
if r.table('domains').filter({'name': linked_domain}).count().run(conn):
continue
crawl_domain.delay(linked_domain)
record_success(conn, domain, domain_info)
|
nycholas/ask-undrgz | src/ask-undrgz/django/template/defaultfilters.py | Python | bsd-3-clause | 28,636 | 0.005133 | """Default variable filters."""
import re
from decimal import Decimal, InvalidOperation, ROUND_HALF_UP
import random as random_module
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.template import Variable, Library
from django.conf import settings
from django.utils import formats
from django.utils.translation import ugettext, ungettext
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.safestring import mark_safe, SafeData
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_unicode(args[0])
if isinstance(args[0], SafeData) and getattr(func, 'is_safe', False):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
addslashes.is_safe = True
addslashes = stringfilter(addslashes)
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
capfirst.is_safe=True
capfirst = stringfilter(capfirst)
_base_js_escapes = (
('\\', r'\u005C'),
('\'', r'\u0027'),
('"', r'\u0022'),
('>', r'\u003E'),
('<', r'\u003C'),
('&', r'\u0026'),
('=', r'\u003D'),
('-', r'\u002D'),
(';', r'\u003B'),
(u'\u2028', r'\u2028'),
(u'\u2029', r'\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in _js_escapes:
value = value.replace(bad, good)
return value
escapejs = stringfilter(escapejs)
def fix_ampersands(value):
"""Replaces ampersands with ``&`` entities."""
from django.utils.html import fix_ampersands
return fix_ampersand | s(value)
fix_ampersands.is_safe=True
fix_ampersands = stringfilter(fix_ampersands)
# Values for testin | g floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completley invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) / (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_unicode(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return u''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_unicode(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return u''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format(u'%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
return mark_safe(formats.number_format(u'%s' % str(d.quantize(exp, ROUND_HALF_UP)), abs(p)))
except InvalidOperation:
return input_val
floatformat.is_safe = True
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_unicode(iri_to_uri(value))
iriencode.is_safe = True
iriencode = stringfilter(iriencode)
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
from django.utils.html import escape
lines = value.split(u'\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = unicode(len(unicode(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line))
return mark_safe(u'\n'.join(lines))
linenumbers.is_safe = True
linenumbers.needs_autoescape = True
linenumbers = stringfilter(linenumbers)
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
lower.is_safe = True
lower = stringfilter(lower)
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
make_list.is_safe = False
make_list = stringfilter(make_list)
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value))
slugify.is_safe = True
slugify = stringfilter(slugify)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return (u"%" + unicode(arg)) % value
except (ValueError, TypeError):
return u""
stringformat.is_safe = True
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
title.is_safe = True |
OrlyMar/gasistafelice | gasistafelice/lib/views_support.py | Python | agpl-3.0 | 10,577 | 0.011629 | # This code has been taken from http://www.assembla.com/spaces/datatables_demo/wiki
from django.db.models import Q
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.utils.cache import add_never_cache_headers
from django.utils import simplejson
import os
from django.conf import settings
import logging, traceback
log = logging.getLogger(__name__)
#TODO: Fero def prepare_datatables_list
def prepare_datatables_queryset(request, querySet, columnIndexNameMap, *args):
"""
Retrieve querySet to be displayed in datatables..
Usage:
querySet: query set to draw data from.
columnIndexNameMap: field names in order to be displayed.
Return a tuple:
querySet: data to be displayed after this request
datatables parameters: a dict which includes
- iTotalRecords: total data before filtering
- iTotalDisplayRecords: total data after filtering
"""
try:
iTotalRecords = querySet.count() #count how many records are in queryset before matching final criteria
except:
return prepare_datatables_list(request, querySet, columnIndexNameMap, *args)
cols = int(request.GET.get('iColumns',0)) # Get the number of columns
iDisplayLength = min(int(request.GET.get('iDisplayLength',10)),100) #Safety measure. If someone messes with iDisplayLength manually, we clip it to the max value of 100.
startRecord = int(request.GET.get('iDisplayStart',0)) # Where the data starts from (page)
endRecord = startRecord + iDisplayLength # where the data ends (end of page)
# Ordering data
iSortingCols = int(request.GET.get('iSortingCols',0))
asortingCols = []
if iSortingCols:
for sortedColIndex in range(0, iSortingCols):
sortedColID = int(request.GET.get('iSortCol_'+str(sortedColIndex),0))
if request.GET.get('bSortable_{0}'.format(sortedColID), 'false') == 'true': # make sure the column is sortable first
sortedColName = columnIndexNameMap[sortedColID]
sortingDirection = request.GET.get('sSortDir_'+str(sortedColIndex), 'asc')
if sortingDirection == 'desc':
sortedColName = '-'+sortedColName
asortingCols.append(sortedColName)
querySet = querySet.order_by(*asortingCols)
# Determine which columns are searchable
searchableColumns = []
for col in range(0,cols):
if request.GET.get('bSearchable_{0}'.format(col), False) == 'true': searchableColumns.append(columnIndexNameMap[col])
# Apply filtering by value sent by user
customSearch = request.GET.get('sSearch', '').encode('utf-8');
if customSearch != '':
outputQ = None
first = True
for searchableColumn in searchableColumns:
kwargz = {searchableColumn+"__icontains" : customSearch}
outputQ = outputQ | Q(**kwargz) if outputQ else Q(**kwargz)
querySet = querySet.filter(outputQ)
# Individual column search
outputQ = None
for col in range(0,cols):
if request.GET.get('sSearch_{0}'.format(col), False) > '' and request.GET.get('bSearchable_{0}'.format(col), False) == 'true':
kwargz = {columnIndexNameMap[col]+"__icontains" : request.GET['sSearch_{0}'.format(col)]}
outputQ = outputQ & Q(**kwargz) if outputQ else Q(**kwargz)
if outputQ: querySet = querySet.filter(outputQ)
iTotalDisplayRecords = querySet.count() #count how many records match the final criteria
if endRecord > startR | ecord:
querySet = querySet[startRecord:endRecord] #get the slice
return querySet, {
'iTotalRecords' : iTotalRecords,
'iTotalDisplayRecords' : iTotalDisplayRecords,
}
def prepare_datatables_list(request, queryList, columnIndexNameMap, *args):
"""
Retrieve list of objects to be displayed in datatables..
Usage:
queryList: raw list of objects set to draw data from.
col | umnIndexNameMap: field names in order to be displayed.
Return a tuple:
queryList: data to be displayed after this request
datatables parameters: a dict which includes
- iTotalRecords: total data before filtering
- iTotalDisplayRecords: total data after filtering
"""
iTotalRecords = len(queryList)
# Ordering data
# Determine which columns are searchable
# Apply filtering by value sent by user
# Individual column search
return queryList, {
'iTotalRecords' : iTotalRecords,
'iTotalDisplayRecords' : iTotalRecords,
}
def render_datatables(request, records, dt_params, jsonTemplatePath, moreData=None):
"""
Render datatables..
Usage:
querySet: query set to draw data from.
dt_params: encapsulate datatables parameters. DataTables reference: http://www.datatables.net/ref
jsonTemplatePath: template file to generate custom json from.
"""
sEcho = int(request.GET.get('sEcho',0)) # required echo response
iTotalRecords = dt_params["iTotalRecords"]
iTotalDisplayRecords = dt_params["iTotalDisplayRecords"]
jstonString = render_to_string(jsonTemplatePath, locals()) #prepare the JSON with the response, consider using : from django.template.defaultfilters import escapejs
response = HttpResponse(jstonString, mimetype="application/javascript")
#prevent from caching datatables result
add_never_cache_headers(response)
return response
def render_datatables_automagic(request, querySet, columnIndexNameMap, iTotalRecords, iTotalDisplayRecords, moreData=None):
"""
Render datatables..
Usage:
querySet: query set to draw data from.
dt_params: encapsulate datatables parameters. DataTables reference: http://www.datatables.net/ref
columnIndexNameMap: field names in order to be displayed.
other parameters follows datatables specifications: http://www.datatables.net/ref
"""
sEcho = int(request.GET.get('sEcho',0)) # required echo response
# Pass sColumns
keys = columnIndexNameMap.keys()
keys.sort()
colitems = [columnIndexNameMap[key] for key in keys]
sColumns = ",".join(map(str,colitems))
aaData = []
a = querySet.values()
for row in a:
rowkeys = row.keys()
rowvalues = row.values()
rowlist = []
for col in range(0,len(colitems)):
for idx, val in enumerate(rowkeys):
if val == colitems[col]:
rowlist.append(str(rowvalues[idx]))
aaData.append(rowlist)
response_dict = {}
response_dict.update({'aaData':aaData})
response_dict.update({'sEcho': sEcho, 'iTotalRecords': iTotalRecords, 'iTotalDisplayRecords':iTotalDisplayRecords, 'sColumns':sColumns})
response_dict.update({'moreData':moreData})
response = HttpResponse(simplejson.dumps(response_dict), mimetype='application/javascript')
#prevent from caching datatables result
add_never_cache_headers(response)
return response
#Needed to insert images in report
def pisa_fetch_resources(uri, rel):
path = os.path.join(settings.MEDIA_ROOT, uri.replace(settings.MEDIA_URL, ""))
return path
#------------------------------------------------------------------------------
# Author: Luca Ferroni
# License: AGPLv3
from django.contrib.admin import helpers
from django.utils.safestring import mark_safe
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.template import Template
from django.template.response import TemplateResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
# Naive implementation to be tuned as data protocol exchange for Ajax requests
template_success = Template("""
<div id="response" class="su |
burunduk3/t.sh | unsorted.py | Python | gpl-2.0 | 1,016 | 0.024606 | import os
suffixes = {
'c': 'c',
'pas': 'pascal', 'dpr': 'delphi',
'java': 'java', 'pl': 'perl', 'py': detector_python, 'sh': 'bash'
}
def suffixes_all ():
return suffixes.keys ()
def tests_export ( problem ):
os.chdir (problem.path)
if not problem.tests:
problem.research_tests ()
tests = [Test.file (x) for x in problem.tests]
if not tests:
raise Error ('[problem %s]: no tests found' % problem.name)
if not os.path.isdir ('te | sts'):
os.mkdir ('tests')
pattern = '%02d'
if len (tests) >= 100:
pattern = '%03d'
if len (tests) >= 1000:
raise Error ("[problem %s]: too many tests (%d)" % (problem.name, len (tests)))
n = 0
for i, x in enumerate (tests):
test = x.create ()
name = patt | ern % (i + 1)
shutil.copy (test, os.path.join ('tests', name))
shutil.copy (test + '.a', os.path.join ('tests', name) + '.a')
n += 1
log ('pattern: %s, tests copied: %d' % (pattern, n))
|
vasilcovsky/pytinypng | pytinypng/__init__.py | Python | bsd-2-clause | 54 | 0 | __ | version__ = '0.0.9'
__author__ = 'Igor Va | silcovsky'
|
kafluette/ffnnet | ffnnet.py | Python | apache-2.0 | 10,067 | 0.000397 | #!/usr/bin/env python
"""Example which shows with the MNIST dataset how Lasagne can be used."""
from __future__ import print_function
import gzip
import itertools
import pickle
import os
import sys
import numpy as np
import lasagne
import theano
import theano.tensor as T
from scipy.linalg import hadamard
from scipy.special import gamma
import time
PY2 = sys.version_info[0] == 2
if PY2:
from urllib import urlretrieve
def pickle_load(f, encoding):
return pickle.load(f)
else:
from urllib.request import urlretrieve
def pickle_load(f, encoding):
return pickle.load(f, encoding=encoding)
DATA_URL = 'http://deeplearning.net/data/mnist/mnist.pkl.gz'
DATA_FILENAME = 'mnist.pkl.gz'
NUM_EPOCHS = 50
BATCH_SIZE = 600
NUM_HIDDEN_UNITS = 512
LEARNING_RATE = 0.01
class FastfoodLayer(lasagne.layers.Layer):
incoming = None
num_units = None
G = None
B = None
S = None
H = None
PI = None
def __init__(self, incoming, num_units):
self.incoming = incoming
self.num_units = num_units
self.rng = rng = np.random.RandomState()
# G - Gaussian random matrix
diag_values = np.asarray(rng.normal(0, 1, size=num_units))
G_values = np.zeros((num_units, num_units))
for i in xrange(num_units):
G_values[i, i] = diag_values[i]
self.G = G = theano.shared(value=G_values, name='G', borrow=True)
# B - binary scaling matrix
diag_values = rng.randint(0, 2, size=num_units)
B_values = np.zeros((num_units, num_units))
for i in xrange(num_units):
B_values[i, i] = diag_values[i] if diag_values[i] == 1 else -1
self.B = theano.shared(value=B_values, name='B', borrow=True)
# S - scaling matrix (???)
S_values = np.zeros((num_units, num_units))
g_frob = (1 / np.sqrt((np.linalg.norm(G.get_value(borrow=True),
ord='fro'))))
| area = (1.0 / np.sqrt(num_units * np.pi)) *\
((2 * np.pi *np.exp(1)) / num_units) ** (num_units / 2)
s_i = ((2.0 * np.pi) ** (-num_units / 2.0)) * (1.0 / area)
for i in xrange(num_units):
S_values[i, i] = s_i * g_frob
self.S = theano.shared(value=S_values, name='S', borrow=True)
# pi - permutation matrix
# generated by shuffling the columns of the dxd identity matrix
perm_matrix_values = np.identity(num_units)
| np.random.shuffle(np.transpose(perm_matrix_values))
perm_matrix = theano.shared(value=perm_matrix_values, name='PI',
borrow=True)
self.PI = perm_matrix
# H - Hadamard matrix
H_values = hadamard(num_units, dtype=np.int)
H = theano.shared(value=H_values, name='H', borrow=True)
self.H = H
def get_params(self):
return [self.S, self.G, self.B]
def get_output_for(self, input, **kwargs):
sigma = 0.01
m = 0.1
var = reduce(T.dot, [self.S, self.H, self.G, self.PI, self.H, self.B,
input])
phi_exp = (1 / (sigma * np.sqrt(self.num_units))) * var
phi_exp = phi_exp % (2*np.pi)
phi = 1/np.sqrt(m)*T.sin(phi_exp) # M*e^(jtheta) = Mcos(theta) + jMsin(theta), so don't need (1 / numpy.sqrt(m)) * T.exp(1j * phi_exp)
return phi
def fnnet_loss(yhat, y):
return lasagne.objectives.categorical_crossentropy(yhat, y)
def _load_data(url=DATA_URL, filename=DATA_FILENAME):
"""Load data from `url` and store the result in `filename`."""
if not os.path.exists(filename):
print("Downloading MNIST dataset")
urlretrieve(url, filename)
with gzip.open(filename, 'rb') as f:
return pickle_load(f, encoding='latin-1')
def load_data():
"""Get data with labels, split into training, validation and test set."""
data = _load_data()
X_train, y_train = data[0]
X_valid, y_valid = data[1]
X_test, y_test = data[2]
return dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
X_test=theano.shared(lasagne.utils.floatX(X_test)),
y_test=T.cast(theano.shared(y_test), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
num_examples_test=X_test.shape[0],
input_dim=X_train.shape[1],
output_dim=10,
)
def build_model(input_dim, output_dim,
batch_size=BATCH_SIZE, num_hidden_units=NUM_HIDDEN_UNITS):
"""Create a symbolic representation of a neural network with `input_dim`
input nodes, `output_dim` output nodes and `num_hidden_units` per hidden
layer.
The training function of this model must have a mini-batch size of
`batch_size`.
A theano expression which represents such a network is returned.
"""
# TODO: more layers and noise layers (dropout/gaussian)
# d is closest power of 2
d = int(2**np.ceil(np.log2(input_dim)))
print('batch_size =', batch_size, ', input_dim =', input_dim)
print('d =', d)
l_in = lasagne.layers.InputLayer(
shape=(batch_size, input_dim),
)
l_pad = lasagne.layers.PadLayer(
incoming=l_in,
width=d
)
l_ff1 = FastfoodLayer(
incoming=l_pad,
num_units=d,
) # TODO: trim d back down to num_hidden_units as they were just 0s
l_hidden1 = lasagne.layers.DenseLayer(
incoming=l_ff1,
num_units=d,
nonlinearity=lasagne.nonlinearities.sigmoid,
)
l_ff2 = FastfoodLayer(
incoming=l_hidden1,
num_units=d,
)
l_out = lasagne.layers.DenseLayer(
incoming=l_ff2,
num_units=output_dim,
nonlinearity=lasagne.nonlinearities.softmax,
)
return l_out
def create_iter_functions(dataset, output_layer,
X_tensor_type=T.matrix,
batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE):
"""Create functions for training, validation and testing to iterate one
epoch.
"""
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * batch_size,
(batch_index + 1) * batch_size)
objective = lasagne.objectives.Objective(output_layer,
loss_function=fnnet_loss)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch,
deterministic=True)
pred = T.argmax(
output_layer.get_output(X_batch, deterministic=True), axis=1)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = lasagne.layers.get_all_params(output_layer)
updates = lasagne.updates.sgd(loss_train, all_params, learning_rate)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
)
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
iter_test = theano.function(
[batch_index], [loss_eval, accuracy],
givens={
X_batch: dataset['X_test'][batch_slice],
y_batch: dataset['y_test'][batch_slice],
},
)
return dict(
train=iter_train,
valid=iter_valid,
test=iter_test,
)
def train(iter_funcs, dataset, batch_size=BATCH_SIZE):
"""Train the model with `dataset` with mini-batch training. Each
mini-batch has `batch_size` recordings.
"""
num_batches_train = dataset['num_examples_train'] // batch_size
num_batches_valid = dataset['num_examples_valid'] // batch_size
for epoch in itertools.count(1):
|
RedFantom/GSF-Parser | network/minimap/server.py | Python | gpl-3.0 | 4,942 | 0.000405 | """
Author: RedFantom
Contributors: Daethyra (Naiii) and Sprigellania (Zarainia)
License: GNU GPLv3 as in LICENSE
Copyright (C) 2016-2018 RedFantom
"""
import socket
import threading
from queue import Queue
from select import select
from ast import literal_eval
from network.connection import Connection
SUPPORTED_COMMANDS = ["location", "health", "logout", "login"]
class MiniMapServer(threading.Thread):
"""
Server for MiniMap location sharing. Passes on all locations
to all Clients. No security is provided.
"""
def __init__(self, host: str, port: int):
"""
:param host: hostname to bind to
:param port: port to bind to
"""
threading.Thread.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(0)
self.socket.setblocking(False)
self.socket.bind((host, port))
self.client_sockets = list()
self.client_names = dict()
self.exit_queue = Queue()
self.banned = list()
def run(self):
"""
Receive commands from the various clients and distribute their
commands to the other Clients on the Server. This Server
DOES NOT support banning or any form of user control at the
moment.
"""
self.socket.listen(12)
while True:
if not self.exit_queue.empty() and self.exit_queue.get() is True:
print("[MiniMapServer] Exit signal received.")
break
self.accept_clients()
self.update_clients()
for client in self.client_sockets:
client.close("exit")
self.socket.close()
def update_clients(self):
"""Get location information from Clients"""
for client in self.client_sockets.copy():
try:
message = client.get_message()
except socket.timeout:
self.logout_client(client)
continue
# Logout Clients
if message == "logout":
self.logout_client(client)
continue
# Update location
elems = message.split("_")
if len(elems) != 3 or elems[0] not in SUPPORTED_COMMANDS:
raise RuntimeError("Unsupported command: {}".format(message))
_, name, location_tuple = elems
# Safely evaluate location_tuple
tup = literal_eval(location_tuple)
assert isinstance(tup, tuple)
assert len(tup) == 2
# Send location update to other clients
for other in self.client_sockets:
other.send(message)
# Done
return True
def accept_clients(self):
"""Accept new Clients if the Socket is ready"""
if self.socket in select([self.socket], [], [], 0)[0]:
print("[MiniMapServer] Accepting new Client.")
conn, addr = self.socket.accept()
if addr in self.banned:
print("[MiniMapServer] Banned Client attempted to connect: {}".format(addr))
conn.close()
return False
# Login procedure
self.login_client(conn, addr)
return True
def login_client(self, conn, addr):
"""Log a Client into the Server if it gives the right commands"""
conn = Connection(sock=conn)
try:
mess = conn.get_message()
except socket.timeout:
conn.close()
return
if mess is None:
return
elems = mess.split("_")
# elems: ("login", username: str)
if len(elems) != 2 or elems[0] != "login" or elems[1] in self.client_names.values():
conn.close("exit")
# Save connection
conn.send("login") # Confirmation
self.client_sockets.append(conn)
self.client_names[conn] = elems[1]
print("[MiniMapServer] Client Login {}".format(elems[1]))
# Login succeed
for client in self.client_sockets:
client.send(mess) # login_username
# Send current users to newly logged in Client
iterator = self.client_sockets.copy()
iterator.remove(conn)
for client in iterator:
client.send("login_{}".format(self.client_names[conn]))
return True
def logout_client(self, client):
| """Logout a Client from the Server"""
self.client_sockets.remove(client)
name = self.client_names[client]
for client_alt in self.client_sockets:
client_alt.send("logout_{}".format(name))
del self.client_names[client]
try:
client.send("exi | t")
except (OSError, socket.error):
pass
print("[MiniMapServer] Client logout")
client.close()
def stop(self):
"""Stop the Server's activities"""
self.exit_queue.put(True)
|
HMProgrammingClub/NYCSL | problems/workers/worker.py | Python | mit | 6,938 | 0.027674 | import os
import os.path
import stat
import platform
import trueskill
import pymysql.cursors
import random
import shutil
import urllib.request
import urllib.parse
from sandbox import *
from config import *
import copy
TRON_PROBLEM_ID = 3
cnx = pymysql.connect(host="162.243.229.252", user="superuser", database="Defhacks", password=PASS, charset="utf8mb4", cursorclass=pymysql.cursors.DictCursor)
cursor = cnx.cursor()
def unpack(filePath, destinationFilePath):
tempPath = os.path.join(destinationFilePath, "bot")
os.mkdir(tempPath)
# Extract the archive into a folder call 'bot'
if platform.system() == 'Windows':
os.system("7z x -o"+tempPath+" -y "+filePath+". > NUL")
else:
os.system("unzip -u -d"+tempPath+" "+filePath+" > /dev/null 2> /dev/null")
# Remove __MACOSX folder if present
macFolderPath = os.path.join(tempPath, "__MACOSX")
if os.path.exists(macFolderPath) and os.path.isdir(macFolderPath):
shutil.rmtree(macFolderPath)
# Copy contents of bot folder to destinationFilePath remove bot folder
for filename in os.listdir(tempPath):
shutil.move(os.path.join(tempPath, filename), os.path.join(destinationFilePath, filename))
shutil.rmtree(tempPath)
#os.remove(filePath)
def runGame(userIDs, muValues, sigmaValues):
print("Starting game")
# Setup working path
workingPath = "workingPath"
if os.path.exists(workingPath):
shutil.rmtree(workingPath)
os.makedirs(workingPath)
os.chmod(workingPath, 0o777)
shutil.copyfile("Tron_Environment.py", os.path.join(workingPath, "Tron_Environment.py"))
sandbox = Sandbox(workingPath)
# Unpack and setup bot files
botPaths = [os.path.join(workingPath, str(userID)) for userID in userIDs]
for botPath in botPaths: os.mkdir(botPath)
for a in range(len(userIDs)): unpack("../outputs/TR/"+ str(userIDs[a]) + ".zip", botPaths[a])
for botPath in botPaths:
if os.path.isfile(os.path.join(botPath, "run.sh")) == False:
return
os.chmod(botPath, 0o777)
os.chmod(os.path.join(botPath, "run.sh"), 0o777)
# Build the shell command that will run the game. Executable called environment houses the game environment
runGameShellCommand = "python3 /var/www/nycsl/problems/workers/"+workingPath+"/Tron_Environment.py "
for botPath in botPaths: runGameShellCommand += "\"cd "+os.path.abspath(botPath)+"; "+os.path.join(os.path.abspath(botPath), "run.sh")+"\" "
print(runGameShellCommand)
# Run game
sandbox.start(runGameShellCommand)
lines = []
while True:
line = sandbox.read_line(200)
if line == None:
break
lines.append(line)
print("Output----------------------")
print("\n".join(lines));
print("----------------------------")
# Get player ranks and scores by parsing shellOutput
if len(lines) < 2:
print("NOT ENOUGH OUTPUT!!!!\n ABORTING")
return
if "won!" in lines[-2]:
winnerIndex = int(lines[-2][len("Player ") : -len("won!")]) - 1
loserIndex = 0 if winnerIndex == 1 else 1
else:
winnerIndex = random.randrange(0, 2)
loserIndex = 0 if winnerIndex == 1 else 1
winnerID = userIDs[winnerIndex]
loserID = userIDs[loserIndex]
# Update trueskill mu and sigma values
winnerRating = trueskill.Rating(mu=float(muValues[winnerIndex]), sigma=float(sigmaValues[winnerIndex]))
loserRating = trueskill.Rating(mu=float(muValues[loserIndex]), sigma=float(sigmaValues[loserIndex]))
wi | nnerRating, loserRating = trueskill.rate_1vs1(winnerRating, loserRating)
print("Score: " + str(winnerRating.mu - (3*winnerRating.sigma)))
cursor.execute("UPDATE Submission SET mu = %f, sigma = %f, score = %f WHERE userID = %d and problemID = %d" % (winnerRating.mu, winnerRating.sigma, float(winnerRating.mu - (3*winnerRating.sigma)), winnerID, TRON_PROBLEM_ID))
cursor.execute("UPDATE Submission SET mu = %f, sigma | = %f, score = %f WHERE userID = %d and problemID = %d" % (loserRating.mu, loserRating.sigma, float(loserRating.mu - (3*loserRating.sigma)), loserID, TRON_PROBLEM_ID))
cnx.commit()
# Get replay file by parsing shellOutput
replayFilename = lines[-1][len("Output file is stored at ") : len(lines[-1])]
shutil.move(os.path.join(workingPath, replayFilename), "../storage")
# Store results of game
cursor.execute("INSERT INTO Game (replayFilename) VALUES (\'"+os.path.basename(replayFilename)+"\')")
cnx.commit()
cursor.execute("SELECT gameID FROM Game WHERE replayFilename = \'"+replayFilename+"\'")
gameID = cursor.fetchone()['gameID']
cursor.execute("INSERT INTO GameToUser (gameID, userID, rank, playerIndex) VALUES (%d, %d, %d, %d)" % (gameID, winnerID, 0, 0 if userIDs[0] == winnerID else 1))
cursor.execute("INSERT INTO GameToUser (gameID, userID, rank, playerIndex) VALUES (%d, %d, %d, %d)" % (gameID, loserID, 1, 0 if userIDs[0] == loserID else 1))
cnx.commit()
print("Done inserting into mysql")
shutil.rmtree(workingPath)
print("Done with game")
def getRank(submissionID):
return int(urllib.request.urlopen("http://nycsl.io/php/rank?submissionID="+str(submissionID)).read())
def postToSlack(text):
pass
#urllib.request.urlopen("https://slack.com/api/chat.postMessage?"+ urllib.parse.urlencode({"token" : ROBOTICS_SLACK_TOKEN, "channel" : "programming_electrics", "text": text}))
#urllib.request.urlopen("https://slack.com/api/chat.postMessage?"+ urllib.parse.urlencode({"token" : NYCSL_SLACK_TOKEN, "channel" : "general", "text": text}))
while True:
cursor.execute("SELECT * FROM Submission WHERE isReady = 1 and problemID = " + str(TRON_PROBLEM_ID))
submissions = cursor.fetchall()
submissions.sort(key=lambda x: int(x['score']))
submission = submissions[random.randrange(0, len(submissions))]
allowedOpponents = copy.deepcopy(submissions)
allowedOpponents.remove(submission)
opponent = allowedOpponents[random.randrange(0, len(allowedOpponents))]
submissionStartingRank = getRank(submission['submissionID'])
opponentStartingRank = getRank(opponent['submissionID'])
runGame([submission['userID'], opponent['userID']], [submission['mu'], opponent['mu']], [submission['sigma'], opponent['sigma']])
newSubmissionRank = getRank(submission['submissionID'])
newOpponentRank = getRank(opponent['submissionID'])
def rankChangePost(userID, rank):
cursor.execute("SELECT * FROM User WHERE userID="+str(userID))
player = cursor.fetchone()
postToSlack(player['firstName'] + " " + player['lastName'] + " has moved into rank " + str(rank))
if newSubmissionRank != submissionStartingRank:
rankChangePost(submission['userID'], newSubmissionRank)
if newOpponentRank != opponentStartingRank:
rankChangePost(opponent['userID'], newOpponentRank)
if len(os.listdir("../storage")) > 100000:
files = os.listdir("../storage")
files.sort()
for f in files:
if os.path.isfile(os.path.join("../storage", f)):
os.remove(os.path.join("../storage", f))
break
# Keep docker from crashing the system
os.system("sudo rm /run/network/ifstate.veth*")
os.system("docker stop $(docker ps -a -q)")
os.system("docker rm $(docker ps -a -q)")
|
henriquegemignani/randovania | randovania/game_description/world/dock.py | Python | gpl-3.0 | 1,738 | 0.000575 | from dataclasses import d | ataclass
from enum import unique, Enum
from typing import Iterator
from frozendict import frozendict
from randovania.game_description.requirements import Requirement
@unique
class DockLockType(Enum):
FRONT_ALWAYS_BACK_FREE = 0
FRONT_BLAST_BACK_FREE_UNLOCK = 1
FRONT_BLAST_BACK_BLAST = 2
FRONT_BLAST_BACK_IMPOSSIBLE = 3
@dataclass(frozen=True, order=True)
class DockWeakness:
name: str
lock_type: DockLockType
| extra: frozendict
requirement: Requirement
def __hash__(self):
return hash((self.name, self.lock_type, self.extra))
def __repr__(self):
return self.name
@property
def long_name(self):
return self.name
@dataclass(frozen=True)
class DockType:
short_name: str
long_name: str
extra: frozendict
@dataclass(frozen=True)
class DockWeaknessDatabase:
dock_types: list[DockType]
weaknesses: dict[DockType, dict[str, DockWeakness]]
default_weakness: tuple[DockType, DockWeakness]
def find_type(self, dock_type_name: str) -> DockType:
for dock_type in self.dock_types:
if dock_type.short_name == dock_type_name:
return dock_type
raise KeyError(f"Unknown dock_type_name: {dock_type_name}")
def get_by_type(self, dock_type: DockType) -> Iterator[DockWeakness]:
yield from self.weaknesses[dock_type].values()
def get_by_weakness(self, dock_type_name: str, weakness_name: str) -> DockWeakness:
return self.weaknesses[self.find_type(dock_type_name)][weakness_name]
@property
def all_weaknesses(self) -> Iterator[DockWeakness]:
for weakness_dict in self.weaknesses.values():
yield from weakness_dict.values()
|
leotrubach/sourceforge-allura | ForgeGit/forgegit/tests/functional/test_controllers.py | Python | apache-2.0 | 8,569 | 0.003151 | import json
import re
import tg
import pkg_resources
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import c
from ming.orm import ThreadLocalORMSession
from datadiff.tools import assert_equal
from allura import model as M
from allura.lib import helpers as h
from allura.tests import decorators as td
from alluratest.controller import TestController
class _TestCase(TestController):
def setUp(self):
super(_TestCase, self).setUp()
self.setup_with_tools()
@td.with_git
def setup_with_tools(self):
h.set_context('test', 'src-git', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgegit', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testgit.git'
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-git', neighborhood='Projects')
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
class TestRootController(_TestCase):
def test_index(self):
resp = self.app.get('/src-git/').follow().follow()
assert 'git://' in resp
def test_index_empty(self):
self.app.get('/git/')
def test_commit_browser(self):
resp = self.app.get('/src-git/commit_browser')
def test_commit_browser_data(self):
resp = self.app.get('/src-git/commit_browser_data')
data = json.loads(resp.body);
assert data['max_row'] == 3
assert data['next_column'] == 1
assert_equal(data['built_tree']['df30427c488aeab84b2352bdf88a3b19223f9d7a'],
{u'url': u'/p/test/src-git/ci/df30427c488aeab84b2352bdf88a3b19223f9d7a/',
u'oid': u'df30427c488aeab84b2352bdf88a3b19223f9d7a',
u'column': 0,
u'parents': [u'6a45885ae7347f1cac5103b0050cc1be6a1496c8'],
u'message': u'Add README', u'row': 1})
def test_log(self):
resp = self.app.get('/src-git/ref/master~/log/')
def test_tags(self):
resp = self.app.get('/src-git/ref/master~/tags/')
def _get_ci(self):
r = self.app.get('/src-git/ref/master:/')
resp = r.follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-git/ci/'):
return tag['href']
return None
def test_commit(self):
ci = self._get_ci()
resp = self.app.get(ci)
assert 'Rick' in resp, resp.showbrowser()
def test_feed(self):
assert 'Add README' in self.app.get('/feed')
def test_tree(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/')
assert len(resp.html.findAll('tr')) == 2, resp.showbrowser()
resp = self.app.get(ci + 'tree/')
assert 'README' in resp, resp.showbrowser()
links = [ a.get('href') for a in resp.html.findAll('a') ]
assert 'README' in links, resp.showbrowser()
assert 'README/' not in links, resp.showbrowser()
def test_tree_extra_params(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/?format=raw')
assert 'README' in resp, resp.showbrowser()
def test_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README')
assert 'README' in resp.html.find('h2', {'class':'dark title'}).contents[2]
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert 'This is readme' in content, content
assert '<span id="l1" class="code_block">' in resp
assert 'var hash = window.location.hash.substring(1);' in resp
def test_invalid_file(self):
ci = self._get_ci()
self.app.get(ci + 'tree/READMEz', status=404)
def test_diff(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?diff=df30427c488aeab84b2352bdf88a3b19223f9d7a')
assert 'readme' in resp, resp.showbrowser()
assert '+++' in resp, resp.showbrowser()
def test_refresh(self):
notification = M.Notification.query.find(
dict(subject='[test:src-git] 4 new commits to test Git')).first()
domain = '.'.join(reversed(c.app.url[1:-1].split('/'))).replace('_', '-')
common_suffix = tg.config.get('forgemail.domain', '.sourceforge.net')
email = 'noreply@%s%s' % (domain, common_suffix)
assert email in notification['reply_to_address']
def test_file_force_display(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?force=True')
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert re.search(r'<pre>.*This is readme', content), content
assert '</pre>' in content, content
class TestRestController(_TestCase):
def test_index(self):
self.app.get('/rest/p/test/src-git/', status=200)
def test_commits(self):
self.app.get('/rest/p/test/src-git/commits', status=200)
class TestFork(_TestCase):
def setUp(self):
super(TestFork, self).setUp()
to_project = M.Project.query.get(
shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-git/fork', params=dict(
project_id=str(to_project._id),
mount_point='code',
mount_label='Test forked repository'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
def _follow(self, r, **kw):
if r.status_int == 302:
print r.request.url
while r.status_int == 302:
print ' ==> 302 ==> %s' % r.location
r = r.follow(**kw)
return r
def _upstream_page(self, **kw):
r = self.app.get('/src-git/', **kw)
r = self._follow(r, **kw)
return r
def _fork_page(self, **kw):
r = self.app.get('/p/test2/code/', **kw)
r = self._follow(r, **kw)
return r
def _request_merge(self, **kw):
r = self.app.get('/p/test2/code/request_merge', **kw)
r = self._follow(r, **kw)
r = r.forms[0].submit()
r = self._follow(r, **kw)
mr_num = r.request.url.split('/')[-2]
assert mr_num.isdigit(), mr_num
return r, mr_num
def test_fork_form(self):
r = self.app.get('%sfork/' % c.app.repo.url())
assert '<input type="text" name="mount_point" value="test"/>' in r
assert '<input type="text" name="mount_label" value="test - Git"/>' in r
def test_fork_listed_in_parent(self):
assert 'Forks' in self._upstream_page()
def test_fork_display(self):
r = self._fork_page()
assert 'Clone of' in r
assert 'Test forked repository' in r
def test_fork_links_go_to_fork(self):
r = self._fork_page()
hrefs = ( a.get('href') for a in r.html('a') )
hrefs = ( href for href in hrefs if href and '/ci/' in href )
for href in hrefs:
assert href.startswith('/p/test2/code/'), href |
def test_merge_request_visible_to_admin(self):
assert 'Request Merge' in self._fork_page()
def test_merge_request_invisible_to_non_admin(self):
assert 'Request Merge' not in self._fork_page(
extra_environ=dict(username='test-user'))
def test_merge_action_available_to_admin(self):
self.app.get('/p/test2/code/request_merge')
def test_merge_action_unavailable_to_non_admin(self):
s | elf.app.get(
'/p/test2/code/request_merge',
status=403, extra_environ=dict(username='test-user'))
def test_merge_request_detail_view(self):
r, mr_num = self._request_merge()
assert 'would like you to merge' in r, r.showbrowser()
def test_merge_request_list_view(self):
r, mr_num = self._request_merge()
r = self |
ailabitmo/sempubchallenge2014-task1 | CeurWsParser/parsers/publication_parser.py | Python | mit | 13,497 | 0.003186 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import re
import urllib
import os
import tempfile
import traceback
from grab.error import DataNotFound
from grab.tools import rex
from grab.spider import Task
from rdflib import URIRef, Literal
from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, DC, XSD
from PyPDF2 import PdfFileReader
from base import Parser, create_proceedings_uri, create_publication_uri, clean_string
from namespaces import SWRC, BIBO, SWC
import config
class PublicationParser(Parser):
def begin_template(self):
self.data['workshop'] = self.task.url
self.data['volume_number'] = self.extract_volume_number(self.task.url)
proceedings = create_proceedings_uri(self.data['volume_number'])
self.data['workshops'] = []
for workshop in self.graph.objects(proceedings, BIBO.presentedAt):
try:
label = self.graph.objects(workshop, BIBO.shortTitle).next()
self.data['workshops'].append((workshop, label.toPython()))
except StopIteration:
pass
except:
traceback.print_exc()
def end_template(self):
self.check_for_completeness()
@staticmethod
def is_invited(publication):
if rex.rex(publication['link'], r'.*(keynote|invite).*', re.I, default=None):
return True
else:
return False
def write(self):
print "[TASK %s][PublicationParser] Count of publications: %s" % (self.task.url, len(self.data['publications']))
triples = []
proceedings = URIRef(self.data['workshop'])
for publication in self.data['publications']:
if len(publication['file_name'].split()) > 0:
self.spider.add_task(Task('initial', url=publication['link']))
resource = create_publication_uri(self.data['workshop'], publication['file_name'])
triples.append((proceedings, DCTERMS.hasPart, resource))
triples.append((resource, RDF.type, FOAF.Document))
triples.append((resource, DCTERMS.partOf, proceedings))
triples.append((resource, RDF.type, SWRC.InProceedings))
triples.append((resource, DC.title, Literal(publication['name'], datatype=XSD.string)))
triples.append((resource, FOAF.homepage, Literal(publication['link'], datatype=XSD.anyURI)))
if publication['is_invited']:
triples.append((resource, RDF.type, SWC.InvitedPaper))
for editor in publication['editors']:
agent = URIRef(config.id['person'] + urllib.quote(editor.encode('utf-8')))
triples.append((agent, RDF.type, FOAF.Agent))
triples.append((agent, FOAF.name, Literal(editor, datatype=XSD.string)))
triples.append((resource, DC.creator, agent))
triples.append((resource, FOAF.maker, agent))
triples.append((agent, FOAF.made, resource))
if 'presentedAt' in publication and len(publication['presentedAt']) > 0:
for w in publication['presentedAt']:
triples.append((resource, BIBO.presentedAt, w))
self.write_triples(triples)
def parse_template_1(self):
self.begin_template()
publications = []
for publication in self.grab.tree.xpath('//div[@class="CEURTOC"]/*[@rel="dcterms:hasPart"]/li'):
try:
name = clean_string(publication.find('a[@typeof="bibo:Article"]/span').text_content())
publication_link = publication.find('a[@typeof="bibo:Article"]').get('href')
editors = []
for publication_editor in publication.findall('span/span[@rel="dcterms:creator"]'):
publication_editor_name = publication_editor.find('span[@property="foaf:name"]').text_content()
editors.append(clean_string(publication_editor_name))
file_name = publication_link.rsplit('.pdf')[0].rsplit('/')[-1]
publication_object = {
'name': name,
'file_name': file_name,
'link': self.task.url + publication_link,
'editors': editors
}
publication_object['is_invited'] = self.is_invited(publication_object)
if self.check_for_workshop_paper(publication_object):
publications.append(publication_object)
except Exception as ex:
raise DataNotFound(ex)
self.data['publications'] = publications
self.end_template()
def parse_template_2(self):
"""
Examples:
- http://ceur-ws.org/Vol-1008/
- http://ceur-ws.org/Vol-1043/
"""
self.begin_template()
publications = []
for element in self.grab.tree.xpath('/html/body//*[@class="CEURTOC"]//*[a and '
'descendant-or-self::*[@class="CEURAUTHORS"] and '
'descendant-or-self::*[@class="CEURTITLE"]]'):
try:
name_el = element.find_class('CEURTITLE')[0]
name = clean_string(name_el.text_content()).strip()
if name is None or not name:
# In case of unclosed span element with the author list
# Example: http://ceur-ws.org/Vol-640
name = clean_string(name_el.tail)
href = element.find('a').get('href')
link = href if href.startswith('http://') else self.task.url + href
editors = []
editors_list_el = element.find_class('CEURAUTHORS')[0]
editors_list = clean_string(editors_list_el.text_content())
if not editors_list:
# In case of unclosed span element with the author list
# Example: http://ceur-ws.org/Vol-1043
editors_list = clean_string(editors_list_el.tail)
for editor_name in editors_list.split(","):
editor_name = clean_string(editor_name.strip())
if editor_name:
editors.append(editor_name)
if not editors:
#a publication should have non-empty list of authors
raise DataNotFound(link)
f | ile_name = link.rsplit('.pdf')[0].rsplit('/')[-1]
publication_object = {
'name': name,
'file_name': file_name,
'link': link,
'editors': editors
}
publication_object['is_invited'] = self.is_invited(publ | ication_object)
if len(self.data['workshops']) > 1:
try:
session = self.grab.tree.xpath(
'//a[@href="%s"]/preceding::*[@class="CEURSESSION"][1]' % href)[0]
publication_object['presentedAt'] = []
for w in self.data['workshops']:
if w[1] is not None and w[1] in session.text:
publication_object['presentedAt'].append(w[0])
except:
# traceback.print_exc()
pass
if self.check_for_workshop_paper(publication_object):
publications.append(publication_object)
except Exception as ex:
raise DataNotFound(ex)
self.data['publications'] = publications
self.end_template()
def parse_template_3(self):
self.begin_template()
publications = []
elements = self.grab.tree.xpath('//li[a[@href] and (i or em or br)]')
if elements is None or len(elements) == 0:
elements = self.grab.tree.xpath('//p[a[@href] and (i or em)]')
for publication in elements:
try:
name = clean_string(publication.find('a').text_content())
if rex.rex(name, r'.*(preface|first\s+pages|author\s+list|forewo |
mvaled/sentry | src/sentry/tasks/options.py | Python | bsd-3-clause | 1,214 | 0.002471 | from __future__ import absolute_import
import logging
import six
from datetime import timedelta
from django.utils import timezone
from sentry.models import Option
from sentry.options import default_manager
from sentry.options.manager import UnknownOption
from sentry.tasks.base import instrumented_task
ONE_HOUR = 60 * 60
logger = logging.getLogger("sentry")
@instrumented_task(name="sentry.tasks.options.sync_options", queue="options")
def sync_options(cutoff=ONE_HOUR):
"""
Ensures all options that have been updated (within the database) since
``cutoff`` have their correct values stored in the cache.
This **does not** guarantee that the correct value is written into the cache
though it will correct itself in the next update window.
"""
cutoff_dt = tim | ezone.now() - timedelta(seconds=cutoff)
# TODO(dcramer): this doesnt handle deleted options (whic | h shouldn't be allowed)
for option in Option.objects.filter(last_updated__gte=cutoff_dt).iterator():
try:
opt = default_manager.lookup_key(option.key)
default_manager.store.set_cache(opt, option.value)
except UnknownOption as e:
logger.exception(six.text_type(e))
|
bonville/nyc-councilmatic-test | core/migrations/0001_initial.py | Python | mit | 9,776 | 0.00358 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('date', models.DateTimeField(default=None)),
('classification', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('order', models.IntegerField()),
],
),
migrations.CreateModel(
name='ActionRelatedEntity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('entity_type', models.CharField(max_length=100)),
('entity_name', models.CharField(max_length=255)),
('organization_ocd_id', models.CharField(max_length=100, blank=True)),
('person_ocd_id', models.CharField(max_length=100, blank=True)),
('action', models.ForeignKey(to='core.Action', related_name='related_entities')),
],
),
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('ocd_id', models.CharField(max_length=100, unique=True)),
('description', models.TextField()),
('identifier', models.CharField(max_length=50)),
('bill_type', models.CharField(max_length=50)),
('classification', models.CharField(max_length=100)),
('date_created', models.DateTimeField(default=None)),
('date_updated', models.DateTimeField(null=True, default=None)),
('source_url', models.CharField(max_length=255)),
('source_note', models.CharField(max_length=255, blank=True)),
('full_text', models.TextField(blank=True)),
('last_action_date', models.DateTimeField(null=True, default=None)),
('slug', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='BillDocument',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('bill', models.ForeignKey(to='core.Bill', related_name='documents')),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('note', models.TextField()),
('url', models.TextField()),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('ocd_id', models.CharField(max_length=100, unique=True)),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('classification', models.CharField(max_length=100)),
('start_time', models.DateTimeField()),
('end_t | ime', models.DateTimeField(null=True)),
('all_day', models.BooleanField(default=False)),
('status', models.CharField(max_length=100)),
('location_name', | models.CharField(max_length=255)),
('location_url', models.CharField(max_length=255, blank=True)),
('source_url', models.CharField(max_length=255)),
('source_note', models.CharField(max_length=255, blank=True)),
],
),
migrations.CreateModel(
name='EventDocument',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('document', models.ForeignKey(to='core.Document', related_name='events')),
('event', models.ForeignKey(to='core.Event', related_name='documents')),
],
),
migrations.CreateModel(
name='EventParticipant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('note', models.TextField()),
('entity_name', models.CharField(max_length=255)),
('entity_type', models.CharField(max_length=100)),
('event', models.ForeignKey(to='core.Event', related_name='participants')),
],
),
migrations.CreateModel(
name='LegislativeSession',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('identifier', models.CharField(max_length=255)),
('jurisdiction_ocd_id', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('label', models.CharField(max_length=255, blank=True)),
('role', models.CharField(max_length=255, blank=True)),
('start_date', models.DateField(null=True, default=None)),
('end_date', models.DateField(null=True, default=None)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('ocd_id', models.CharField(max_length=100, unique=True)),
('name', models.CharField(max_length=255)),
('classification', models.CharField(max_length=255, null=True)),
('slug', models.CharField(max_length=255, unique=True)),
('parent', models.ForeignKey(null=True, to='core.Organization', related_name='children')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('ocd_id', models.CharField(max_length=100, unique=True)),
('name', models.CharField(max_length=100)),
('headshot', models.CharField(max_length=255, blank=True)),
('source_url', models.CharField(max_length=255)),
('source_note', models.CharField(max_length=255, blank=True)),
('slug', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('ocd_id', models.CharField(max_length=100, unique=True)),
('label', models.CharField(max_length=255)),
('role', models.CharField(max_length=255)),
('organization', models.ForeignKey(to='core.Organization', related_name='posts')),
],
),
migrations.CreateModel(
name='Sponsorship',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('classification', models.CharField(max_length=255)),
('is_primary', models.BooleanField(default=False)),
('bill', models.ForeignKey(to='core.Bill', related_name='sponsorships')),
('person', models.ForeignKey(to='core.Person', related_name='sponsorships')),
|
FunnyMan3595/mcp_rebuild | settings.py | Python | mit | 2,293 | 0.000436 | #!/usr/bin/env python
# All (current) configuration settings take the form of paths to specific files
# or directories. Aside from BASE itself, all paths are assumed to be relative
# to BASE unless specified as absolute.
# Windows examples:
# BASE = r"C:\MCP"
# USER = r"mcp_rebuild\My Projects"
# TARGET = r"Z:\My Mods"
# SOURCE_BUNDLE = r"mcp_rebuild\source.tbz2"
#
# This set of configs uses a MCP directory at C:\MCP, looks for the user's
# projects in C:\MCP\mcp_rebuild\My Projects, puts completed packages in
# Z:\My Mods, and keeps a pristine copy of C:\MCP\src in
# C:\MCP\mcp_rebuild\source.tbz2.
#
# Note: Cygwin users should be able to use either Windows or Cygwin-style paths
# (e.g. / | cygdrive/c/you/get/the/idea). Send in a bug reportif you have
# any problems, the GitHub page is listed in the README.
# Linux examples:
# BASE = r".."
# USER = r"~/mcp_projects"
# TARGET = r" | /var/www/minecraft/bleeding_edge"
# SORUCE_BUNDLE = r".source.tbz2"
#
# This set of configs assumes that rebuild.sh is located in a subdirectory of
# MCP's root. The source bundle is stored as a hidden file in MCP's root,
# projects are read out of the mcp_projects directory in your $HOME, and
# packages are placed directly onto the minecraft/bleeding_edge directory of
# your website. (Assuming, of course, you have a suitably-configured webserver
# and write permissions on that directory.)
# Base MCP directory. If you leave mcp_rebuild in a subdirectory of MCP, you
# can leave this alone.
BASE = r".."
# Base directory for your projects. DO NOT use one of MCP's own subdirectories
# for this! Hanging out with mcp_rebuild is fine, MCP just likes to eat its
# own directory contents.
# The format of this directory's contents is described in the README.
USER = r"mcp_rebuild/projects"
# Where your projects' packages will go when they are created. MCP's
# subdirectories could be used here (since you can always rerun mcp_rebuild), but are not recommended.
TARGET = r"mcp_rebuild/packages"
# Original source bundle, used to reset MCP's source directory to a clean state
# before installing user files.
SOURCE_BUNDLE = r"mcp_rebuild/source.tbz2"
# Okay, I lied a little. This one's not a path. Just set it False once you've
# configured the rest.
UNCONFIGURED = True
|
botswana-harvard/tshilo-dikotla | td_maternal/admin/maternal_interim_idcc_admin.py | Python | gpl-2.0 | 772 | 0.002591 | from collections import Order | edDict
from django.contrib import admin
from edc_export.actions import export_as_csv_action
from tshilo_dikotla.base_model_admin import BaseModelAdmin
from ..forms import MaternalInterimIdccForm
from ..models import MaternalInterimIdcc
from .base_maternal_model_admin import BaseMaternalModelAdmin
class MaternalInterimIdccAdmin(BaseMaternalModelAdmin):
form = Matern | alInterimIdccForm
radio_fields = {'info_since_lastvisit': admin.VERTICAL,
'value_vl_size': admin.VERTICAL}
list_display = ('report_datetime', 'recent_cd4', 'value_vl',)
list_filter = ('info_since_lastvisit', 'recent_cd4_date', 'value_vl_size', 'recent_vl_date')
admin.site.register(MaternalInterimIdcc, MaternalInterimIdccAdmin)
|
e-koch/VLA_Lband | 14B-088/HI/imaging/sd_regridding/gbt_feather_beamsize.py | Python | mit | 2,146 | 0.002796 |
'''
How much is the feathered image affected by assuming a wrong beam size?
'''
from spectral_cube import SpectralCube
import matplotlib.pyplot as plt
import os
from corner import hist2d
from radio_beam import Beam
import astropy.units as u
from astropy.visualization import hist
from uvcombine.uvcombine import feather_simple, feather_compare
import numpy as np
import scipy.ndimage as nd
from paths import fourteenB_HI_data_path, data_path
from constants import hi_freq
vla_cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
gbt_path = os.path.join(data_path, "GBT")
gbt_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088_spectralregrid.fits")
gbt_cube = SpectralCube.read(gbt_name)
gbt_fullregrid_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088.fits")
gbt_fullregrid_cube = SpectralCube.read(gbt_fullregrid_name)
beam_fwhm = lambda diam: ((1.2 * 21 * u.cm) / diam.to(u.cm)) * u.rad
chan = 500
gbt_plane = gbt_cube[chan]
gbt_fr_plane = gbt_fullregrid_cube[chan]
vla_plane = vla_cube[chan].to(u.K, vla_cube.beams[chan].jtok_equiv(hi_freq))
feather_80 = feather_simple(vla_plane.hdu, gbt_plane.hdu,
lowresfwhm=beam_fwhm(80 * u.m).to(u.arcsec))
feather_90 = feather_simple(vla_plane.hdu, gbt_plane.hdu,
lowresfwhm=beam_fwhm(90 * u.m).to(u.arcsec))
feather_90_fr = feather_simple(vla_plane.hdu, gbt_fr_plane.hdu,
lowresfwhm=beam_fwhm(90 * u.m).to(u.arcsec))
feather_100 = feather_simple(vla_plane.hdu, gbt_plane.hdu,
lowresfwhm=beam_fwhm(100 * u.m).to(u.arcsec))
mask = gbt_fr_plane.val | ue > 2
vla_beam_kernel = vla_plane.beam.as_tophat_kernel(vla_plane.header["CDELT2"]).array > 0
vla_mask = np.isfinite(vla_plane)
vla_mask = nd.binary_erosion(vla_mask, vla_beam_kernel, iterat | ions=10)
plt.plot([feather_80.real[vla_mask].sum() / gbt_fr_plane[vla_mask].sum().value,
feather_90.real[vla_mask].sum() / gbt_fr_plane[vla_mask].sum().value,
feather_100.real[vla_mask].sum() / gbt_fr_plane[vla_mask].sum().value])
|
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/histogram2d/_legendgrouptitle.py | Python | mit | 4,724 | 0.002117 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Legendgrouptitle(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2d"
_path_str = "histogram2d.legendgrouptitle"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.gr | aph_objs.histogram2d.legendgrouptitle.Font`
- A dict | of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.histogram2d.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super(Legendgrouptitle, self).__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2d.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.Legendgrouptitle`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
samvarankashyap/linch-pin | linchpin/provision/roles/azure/filter_plugins/filter_list_by_attr.py | Python | gpl-3.0 | 275 | 0 | #!/usr/bin/e | nv python
import linchpin.FilterUtils.FilterUti | ls as filter_utils
class FilterModule(object):
''' A filter to filter list by attribute '''
def filters(self):
return {
'filter_list_by_attr': filter_utils.filter_list_by_attr
}
|
braveghz/cobra | tests/test_directory.py | Python | mit | 905 | 0 | # -*- coding: utf-8 -*-
"""
tests.test_directory
~~~~~~~~~~~~~~~~~~~~
Tests pickup.directory
:author: Feei <feei@feei.cn>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICE | NSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import os
from cobra.config import project_directory
from | cobra.pickup import Directory
def test_file():
absolute_path = os.path.join(project_directory, 'cobra.py')
files, file_sum, time_consume = Directory(absolute_path).collect_files()
ext, ext_info = files[0]
assert '.py' == ext
assert 1 == ext_info['count']
assert 'cobra.py' in ext_info['list']
assert 1 == file_sum
assert time_consume < 1
def test_directory():
absolute_path = project_directory
files, file_sum, time_consume = Directory(absolute_path).collect_files()
assert len(files) > 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.