repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
zillow/ctds | tests/test_cursor_fetchmany.py | Python | mit | 6,714 | 0.002681 | import ctds
from .base import TestExternalDatabase
class TestCursorFetchMany(TestExternalDatabase):
'''Unit tests related to the Cursor.fetchmany() method.
'''
def test___doc__(self):
self.assertEqual(
ctds.Cursor.fetchmany.__doc__,
'''\
fetchmany(size=self.arraysize)
Fetch the next set of rows of a query result, returning a sequence of
sequences. An empty sequence is returned when no more rows are available.
:pep:`0249#fetchmany`
:return: A sequence of result rows.
:rtype: ctds.RowList
'''
)
def test_closed(self):
with self.connect() as connection:
cursor = connection.cursor()
cursor.close()
try:
cursor.fetchmany()
except ctds.InterfaceError as ex:
self.assertEqual(str(ex), 'cursor closed')
else:
self.fail('.fetchmany() did not fail as expected') # pragma: nocover
def test_closed_connection(self): # pylint: disable=invalid-name
connection = self.connect()
with connection.cursor() as cursor:
connection.close()
try:
cursor.fetchmany()
except ctds.InterfaceError as ex:
self.assertEqual(str(ex), 'connection closed')
else:
self.fail('.fetchmany() did not fail as expected') # pragma: nocover
def test_invalid_size(self):
with self.connect() as connection:
with connection.cursor() as cursor:
self.assertRaises(TypeError, cursor.fetchmany, size='123')
def test_premature(self):
with self.connect() as connection:
| with connection.cursor() as cursor:
self.assertRaises(ctds.InterfaceError, cursor.fetchmany)
def test_fetchmany(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
| INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT * FROM @{0};
SELECT i * 2 FROM @{0};
'''.format(self.test_fetchmany.__name__)
)
self.assertEqual([tuple(row) for row in cursor.fetchmany()], [(1,)])
self.assertEqual([tuple(row) for row in cursor.fetchmany()], [(2,)])
self.assertEqual([tuple(row) for row in cursor.fetchmany()], [(3,)])
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchmany()], [(2,)])
self.assertEqual([tuple(row) for row in cursor.fetchmany()], [(4,)])
self.assertEqual([tuple(row) for row in cursor.fetchmany()], [(6,)])
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), None)
self.assertRaises(ctds.InterfaceError, cursor.fetchmany)
cursor.arraysize = 3
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT * FROM @{0};
SELECT i * 2 FROM @{0};
'''.format(self.test_fetchmany.__name__)
)
self.assertEqual([tuple(row) for row in cursor.fetchmany(3)], [(1,), (2,), (3,)])
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchmany(3)], [(2,), (4,), (6,)])
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), None)
self.assertRaises(ctds.InterfaceError, cursor.fetchmany)
def test_size(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT * FROM @{0};
SELECT i * 2 FROM @{0};
'''.format(self.test_size.__name__)
)
self.assertEqual([tuple(row) for row in cursor.fetchmany(3)], [(1,), (2,), (3,)])
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchmany(3)], [(2,), (4,), (6,)])
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), None)
self.assertRaises(ctds.InterfaceError, cursor.fetchmany)
def test_empty_resultset(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT i FROM @{0} WHERE i < 0;
'''.format(self.test_empty_resultset.__name__)
)
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), None)
def test_multiple_resultsets(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT i FROM @{0} WHERE i < 0;
SELECT i AS j FROM @{0} WHERE i > 2;
SELECT i AS k FROM @{0} WHERE i > 3;
SELECT i AS ii FROM @{0};
'''.format(self.test_multiple_resultsets.__name__)
)
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchmany(3)], [(3,)])
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual(list(cursor.fetchmany()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchmany(3)], [(1,), (2,), (3,)])
self.assertEqual(cursor.nextset(), None)
|
damouse/pyparted | src/parted/filesystem.py | Python | gpl-2.0 | 3,706 | 0.001619 | #
# filesystem.py
# Python bindings for libparted (built on top of the _ped Python module).
#
# Copyright (C) 2009-2011 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Cantrell <dcantrell@redhat.com>
#
import _ped
import parted
from decorators import localeC
# XXX: add docstrings!
class FileSystem(object):
@localeC
def __init__(self, type=None, geometry=None, checked=False, PedFileSystem=None):
if checked:
c = 1
else:
c = 0
if PedFileSystem is None:
if type is None:
raise parted.FileSystemException, "no type specified"
elif geometry is None:
raise parted.FileSystemException, "no geometry specified"
self._type = type
self._geometry = geometry
self._checked = checked
self.__fileSystem = _ped.FileSystem(type=fileSystemType[type], geom=geometry.getPedGeometry(), checked=c)
else:
self.__fileSystem = PedFileSystem
self._type = self.__fileSystem.type.name
self._geometry = parted.Geometry(PedGeometry=self.__fileSystem.geom)
if self.__fileSystem.checked:
self._checked = True
else:
self._checked = False
def __eq__(self, other):
return not self.__ne__(o | ther)
def __ne__(self, other):
if hash(self) == hash(other):
return False
| if type(self) != type(other):
return True
return self.type != other.type or self.geometry != other.geometry
def __str__(self):
s = ("parted.FileSystem instance --\n"
" type: %(type)s geometry: %(geometry)r checked: %(checked)s\n"
" PedFileSystem: %(ped)r" %
{"type": self.type, "geometry": self.geometry,
"checked": self.checked, "ped": self.__fileSystem})
return s
@property
def type(self):
"""The type of this filesystem, e.g. ext3."""
return self._type
@property
def geometry(self):
"""The Geometry object describing this filesystem."""
return self._geometry
@property
def checked(self):
"""True if this filesystem has been checked, False otherwise."""
return bool(self._checked)
def getPedFileSystem(self):
"""Return the _ped.FileSystem object contained in this FileSystem.
For internal module use only."""
return self.__fileSystem
# collect all filesystem types and store them in a hash
fileSystemType = {}
__type = _ped.file_system_type_get_next()
fileSystemType[__type.name] = __type
while True:
try:
__type = _ped.file_system_type_get_next(__type)
fileSystemType[__type.name] = __type
except:
break
|
campadrenalin/python-libdeje | deje/quorum.py | Python | gpl-3.0 | 5,211 | 0.005181 | '''
This file is part of python-libdeje.
python-libdeje is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
python-libdeje is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS F | OR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with python-libdeje. If not, see <http://www.gnu.org/licenses/>.
'''
import datetime
from persei import *
from ejtp.identity import Identity
DEFAULT_DURATION = datetime.timedelta(minutes = 5)
class Quorum(object):
def __init__(s | elf, action, qs = None, signatures = {}):
self.action = action
self.qs = qs
self.signatures = {}
self.sent = False
for identity in signatures:
self.sign(identity, signatures[identity])
if qs:
qs.register(self)
@property
def threshtype(self):
return self.action.quorum_threshold_type
def sig_valid(self, key):
if key not in self.signatures:
return False
identity, signature = self.signatures[key]
return (identity in self.participants) and validate_signature(identity, self.hash, signature)
def sign(self, identity, signature = None, duration = DEFAULT_DURATION):
if not signature:
signature = generate_signature(identity, self.hash, duration)
assert_valid_signature(identity, self.hash, signature)
# Equivalent or updated signature or non-colliding read.
# Don't check for collisions in QS
if self.sig_valid(identity.key) or self.threshtype == "read":
self.signatures[identity.key] = (identity, signature)
return
with self.qs.transaction(identity, self):
self.signatures[identity.key] = (identity, signature)
def clear(self):
"""
Clear out all signatures.
"""
self.signatures = {}
def transmittable_sig(self, signer):
return self.signatures[signer][1]
def sigs_dict(self):
sigs = {}
for signer in self.valid_signatures:
sigs[signer] = self.transmittable_sig(signer)
return sigs
def ready(self, document):
return self.done and not self.action.is_done(document)
# Parent-derived properties
@property
def version(self):
return self.action.version
@property
def content(self):
return self.action.serialize()
# Handler-derived properties
@property
def completion(self):
return len(self.valid_signatures)
@property
def competing(self):
return not (self.done or self.outdated)
@property
def done(self):
return self.completion >= self.threshold
@property
def outdated(self):
# Version is not relevant for read requests
if self.threshtype == 'read':
return False
else:
return self.qs.version != self.version
@property
def participants(self):
if not self.qs:
raise ValueError("Cannot determine participants without QS")
return self.qs.participants
@property
def thresholds(self):
if not self.qs:
raise ValueError("Cannot determine thresholds without QS")
return self.qs.thresholds
@property
def threshold(self):
if not self.qs:
raise ValueError("Cannot determine threshold without QS")
return self.thresholds[self.threshtype]
@property
def valid_signatures(self):
return [ x for x in self.signatures if self.sig_valid(x) ]
@property
def hash(self):
return self.action.hash()
def validate_signature(identity, content_hash, signature):
try:
assert_valid_signature(identity, content_hash, signature)
return True
except:
return False
def assert_valid_signature(identity, content_hash, signature):
if not isinstance(identity, Identity):
raise TypeError("Expected ejtp.identity.core.Identity, got %r" % identity)
try:
expires, subsig = signature.split("\x00", 1)
except:
raise ValueError("Bad signature format - no nullbyte separator")
expire_date = datetime.datetime.strptime(String(expires).export(), "%Y-%m-%d %H:%M:%S.%f")
plaintext = expires + content_hash
if not expire_date > datetime.datetime.utcnow():
raise ValueError("Signature is expired")
if not identity.verify_signature(subsig, plaintext):
raise ValueError("Identity object thinks sig is not valid")
def generate_signature(identity, content_hash, duration = DEFAULT_DURATION):
if not isinstance(identity, Identity):
raise TypeError("Expected ejtp.identity.core.Identity, got %r" % identity)
expires = RawData((datetime.datetime.utcnow() + duration).isoformat(' '))
return expires + RawData((0,)) + identity.sign(expires + content_hash)
|
litedesk/litedesk-lib-active_directory | src/litedesk/lib/active_directory/classes/__init__.py | Python | apache-2.0 | 608 | 0 | # Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the | "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Licen | se at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
5j9/yadkard | lib/waybackmachine.py | Python | gpl-3.0 | 4,099 | 0 | """Define related tools for web.archive.org (aka Wayback Machine)."""
import logging
from threading import Thread
from datetime import date
from urllib.parse import urlparse
from regex import compile as regex_compile
from requests import ConnectionError as RequestsConnectionError
from lib.commons import dict_to_sfn_cit_ref
from lib.urls import (
urls_scr, url2dict, get_home_title, get_html, find_authors,
find_journal, find_site_name, find_title, ContentTypeError,
ContentLengthError, StatusCodeError, TITLE_TAG
)
URL_FULLMATCH = regex_compile(
r'https?+://web(?:-beta)?+\.archive\.org/(?:web/)?+'
r'(\d{4})(\d{2})(\d{2})\d{6}(?>cs_|i(?>d_|m_)|js_)?+/(http.*)'
).fullmatch
def waybackmachine_scr(
archive_url: str, date_format: str = '%Y-%m-%d'
) -> tuple:
"""Create the response namedtuple."""
m = URL_FULLMATCH(archive_url)
if not m:
# Could not parse the archive_url. Treat as an ordinary URL.
return urls_scr(archive_url, date_format)
archive_year, archive_month, archive_day, original_url = \
m.groups()
original_dict = {}
thread = Thread(
target=original_url2dict, args=(original_url, original_dict)
)
thread.start()
try:
archive_dict = url2dict(archive_url)
except (ContentTypeError, ContentLengthError) as e:
logger.exception(archive_url)
# Todo: i18n
return 'Invalid content type or length.', e, ''
archive_dict['date_format'] = date_format
archive_dict['url'] = original_url
archive_dict['archive-url'] = archive_url
archive_dict['archive-date'] = date(
int(archive_year), int(archive_month), int(archive_day)
)
thread.join()
if original_dict:
# The original_process has been successful
if (
original_dict['title'] == archive_dict['title']
or original_dict['html_title'] == archive_dict['html_title']
):
archive_dict.update(original_dict)
archive_dict['url-status'] = 'live'
else:
# and original title is the same as archive title. Otherwise it
# means that the content probably has changed and the original data
# cannot be trusted.
archive_dict['url-status'] = 'unfit'
else:
archive_dict['url-status'] = 'dead'
if archive_dict['website'] == 'Wayback Machine':
archive_dict['website'] = (
urlparse(original_url).hostname.replace('www.', '')
)
return dict_to_sfn_cit_ref(archive_dict)
def original_url2dict(ogurl: str, original_dict) -> None:
"""Fill the dictionary with the information found in ogurl."""
# noinspection Py | BroadException
try:
original_dict.update(original_url_dict(ogurl))
except (
ContentTypeError,
ContentLengthError,
StatusCodeError,
RequestsConnectionError,
):
pass
except Exception:
logger.exception(
'There was an unexpected error in waybackmechine thread'
)
def origin | al_url_dict(url: str):
"""Retuan dictionary only containing required data for og:url."""
d = {}
# Creating a thread to request homepage title in background
hometitle_list = [] # A mutable variable used to get the thread result
home_title_thread = Thread(
target=get_home_title, args=(url, hometitle_list)
)
home_title_thread.start()
html = get_html(url)
m = TITLE_TAG(html)
html_title = m['result'] if m else None
if html_title:
d['html_title'] = html_title
authors = find_authors(html)
if authors:
d['authors'] = authors
journal = find_journal(html)
if journal:
d['journal'] = journal
d['cite_type'] = 'journal'
else:
d['cite_type'] = 'web'
d['website'] = find_site_name(
html, html_title, url, authors, hometitle_list, home_title_thread
)
d['title'] = find_title(
html, html_title, url, authors, hometitle_list, home_title_thread
)
return d
logger = logging.getLogger(__name__)
|
victor-cortez/Heimdall | mergesort/mergesort.py | Python | mit | 807 | 0.019827 | def merge(a,b):
pa,pb = 0,0
lis = []
pa,pb = [0,le | n(a) > 0],[0,len(b) > 0]
la,lb = len(a),len(b)
for i in range(la + lb):
if pa[1] is True:
ta = a[pa[0]]
else:
ta = float("inf")
if pb[1] is True:
tb = b[pb[0]]
else:
tb = float("inf")
if tb < ta:
lis.append(tb)
pb[0] = pb[0] + 1
if pb[0] >= lb:
pb[1] = False
else:
lis.append(ta)
| pa[0] = pa[0] + 1
if pa[0] >= la:
pa[1] = False
return lis
def mergesort(lista):
if len(lista) <= 1:
return lista
a,b = [],[]
for i in range(len(lista)):
[a,b][i%2].append(lista[i])
return merge(mergesort(a),mergesort(b)) |
ivano666/tensorflow | tensorflow/python/kernel_tests/pad_op_test.py | Python | apache-2.0 | 6,742 | 0.006526 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class PadOpTest(tf.test.TestCase):
def _npPad(self, inp, paddings, mode):
return np.pad(inp, paddings, mode=mode.lower())
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, use_gpu=False):
np_val = self._npPad(np_inputs, paddings, mode=mode)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(np_inputs, paddings, mode=mode)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode):
with self.test_session():
inx = tf.convert_to_tensor(x)
xs = list(x.shape)
ina = tf.convert_to_tensor(a)
y = tf.pad(inx, ina, mode=mode)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
y,
ys,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC"):
self._testPad(np_inputs, paddings, mode=mode, use_gpu=False)
self._testPad(np_inputs, paddings, mode=mode, use_gpu=True)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode)
def testInputDims(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2, 1, 1, 1, 1]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.test_session():
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int32, np.int64]:
self._testAll((np.random.rand(4, 4, 3) * 100).astype(t),
| [[1, 0], [2, 3], [0, 2]])
def testFloatTypes(self):
for t in [np.float32, np.float64, np.complex64]:
self._testAll(np.random.rand(2, 5).astype(t),
[[1, 0], [2, 0]])
def testShapeFunctionEdgeCases(self):
# Unknown p | addings shape.
inp = tf.constant(0.0, shape=[4, 4, 4, 4])
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(inp, paddings)
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
if __name__ == "__main__":
tf.test.main()
|
momikey/pyrge | setup.py | Python | lgpl-2.1 | 744 | 0.034946 | from distutils.core import setu | p
import sys, os
DATA_FILES = ['README', 'LICENSE']
ASTEROID_PNGS = ['large.png', 'medium.png', 'small.png', 'ship.png']
INVASION_PNGS = ['ship.png', 'alien.png']
TUTORIALS = ['examples/tutorial/'+t for t in os.listdir('examples/tutorial/')]
DATA_FILES += TUTORIALS
setup(
name='pyrge',
version='0.6',
author="Michael Potter",
author_email="michael@ | potterpcs.net",
url="http://github.com/momikey/pyrge",
packages = ['pyrge',
'pyrge.examples.asteroid',
'pyrge.examples.invasion'],
package_dir = {'pyrge': ''},
package_data = {'pyrge': DATA_FILES,
'pyrge.examples.asteroid': ['*.png'],
'pyrge.examples.invasion': ['*.png']},
requires=['pygame (>= 1.9)'])
|
Signbank/FinSL-signbank | signbank/dictionary/urls.py | Python | bsd-3-clause | 4,038 | 0.009163 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.urls import path
from django.views.generic.base import RedirectView
from django.contrib.auth.decorators import permission_required, login_required
# Views
from . import adminviews
from . import publicviews
from . import update
from . import delete
from . import views
# Application namespace
app_name = 'dictionary'
urlpatterns = [
# Public views for dictionary
path('', publicviews.GlossListPublicView.as_view(), name='public_gloss_list'),
path('gloss/<int:pk>', publicviews.GlossDetailPublicView.as_view(), name='public_gloss_view'),
# Support old URLs, redirect them to new URLs.
path('public/gloss/',
RedirectView.as_view(pattern_name='dictionary:public_gloss_list', permanent=False)),
path('public/gloss/<int:pk>',
RedirectView.as_view(pattern_name='dictionary:public_gloss_view', permanent=False)),
# Advanced search page
path('advanced/', permission_required('dictionary.search_gloss')
(adminviews.GlossListView.as_view()), name='admin_gloss_list'),
# Main views for dictionary search page and gloss detail page, these used to be 'admin' views
path('advanced/list/', permission_required('dictionary.search_gloss')(adminviews.GlossListView.as_view())),
path('advanced/gloss/<int:pk>', permission_required('dictionary.search_gloss')
(adminviews.GlossDetailView.as_view()), name='admin_gloss_view'),
# GlossRelation search page
path('advanced/glossrelation/', permission_required('dictionary.search_gloss')
(adminviews.GlossRelationListView.as_view()), name='search_glossrelation'),
# Redirect old URL
path('search/glossrelation/', permission_required('dictionary.search_gloss')
(RedirectView.as_view(pattern_name='dictionary:search_glossrelation', permanent=False))),
# Manage lexicons
path('lexicons/', login_required(views.ManageLexiconsListView.as_view()), nam | e='manage_lexi | cons'),
# Apply for lexicon permissions
path('lexicons/apply/', login_required(views.ApplyLexiconPermissionsFormView.as_view()),
name='apply_lexicon_permissions'),
# Create
path('advanced/gloss/create/', views.create_gloss, name='create_gloss'),
# Urls used to update data
path('update/gloss/<int:glossid>',
update.update_gloss, name='update_gloss'),
path('update/tag/<int:glossid>',
update.add_tag, name='add_tag'),
path('update/relation/',
update.add_relation, name='add_relation'),
path('update/relationtoforeignsign/',
update.add_relationtoforeignsign, name='add_relationtoforeignsign'),
path('update/morphologydefinition/',
update.add_morphology_definition, name='add_morphologydefinition'),
path('update/glossrelation/',
update.gloss_relation, name='add_glossrelation'),
path('advanced/delete/glossurl/<int:glossurl>',
delete.glossurl, name='delete_glossurl'),
# CSV import urls
path('advanced/import/csv/',
update.import_gloss_csv, name='import_gloss_csv'),
path('advanced/import/csv/confirm/',
update.confirm_import_gloss_csv, name='confirm_import_gloss_csv'),
# AJAX urls
path('ajax/keyword/<str:prefix>',
views.keyword_value_list),
path('ajax/gloss/<str:prefix>',
adminviews.gloss_ajax_complete, name='gloss_complete'),
path('ajax/searchresults/',
adminviews.gloss_ajax_search_results, name='ajax_search_results'),
path('ajax/glossrelation-autocomplete/<int:dataset>',
adminviews.glossrelation_autocomplete, name='glossrelation_autocomplete'),
# XML ecv (externally controlled vocabulary) export for ELAN
path('ecv/<int:dataset_id>',
adminviews.gloss_list_xml, name='gloss_list_xml'),
# Public ECV's
path('public-ecv/<int:dataset_id>',
publicviews.public_gloss_list_xml, name='public_gloss_list_xml'),
# Network Graph of GlossRelations
path('network-graph/',login_required(views.network_graph), name='network_graph'),
]
|
hms-dbmi/fourfront | src/encoded/commands/create_mapping_on_deploy.py | Python | mit | 5,218 | 0.001916 | import argparse
import structlog
import logging
from pyramid.paster import get_app
from snovault.elasticsearch.create_mapping import run as run_create_mapping
from dcicutils.log_utils import set_logging
from dcicutils.deployment_utils import CreateMappingOnDeployManager
log = structlog.getLogger(__name__)
EPILOG = __doc__
# This order determines order that items will be mapped + added to the queue
# Can use item type (e.g. file_fastq) or class name (e.g. FileFastq)
ITEM_INDEX_ORDER = [
'Award',
'Lab',
'AccessKey',
'User',
'Ontology',
'OntologyTerm',
'StaticSection',
'Document',
'Protocol',
'FileFormat',
'ExperimentType',
'Vendor',
'Organism',
'Gene',
'GenomicRegion',
'BioFeature',
'Target',
'Construct',
'Enzyme',
'Antibody',
'FileReference',
'IndividualChicken',
'IndividualFly',
'IndividualHuman',
'IndividualMouse',
'IndividualPrimate',
'IndividualZebrafish',
'Image',
'Modification',
'Biosource',
'BiosampleCellCulture',
'Biosample',
'Workflow',
'WorkflowMapping',
'PublicationTracking',
'Software',
'AnalysisStep',
'Badge',
'SopMap',
'SummaryStatistic',
'SummaryStatisticHiC',
'TrackingItem',
'TreatmentAgent',
'TreatmentRnai',
'ImagingPath',
'MicroscopeSettingA1',
'MicroscopeSettingA2',
'MicroscopeSettingD1',
'MicroscopeSettingD2',
'MicroscopeConfiguration',
'HiglassViewConfig',
'QualityMetricAtacseq',
'QualityMetricBamqc',
'QualityMetricBamcheck',
'QualityMetricChipseq',
'QualityMetricDedupqcRepliseq',
'QualityMetricFastqc',
'QualityMetricFlag',
'QualityMetricPairsqc',
'QualityMetricMargi',
'QualityMetricRnaseq',
'QualityMetricRnaseqMadqc',
'QualityMetricWorkflowrun',
'QualityMetricQclist',
'QualityMetricMcool',
'ExperimentAtacseq',
'ExperimentCaptureC',
'ExperimentChiapet',
'ExperimentDamid',
'ExperimentHiC',
'ExperimentMic',
'ExperimentRepliseq',
'ExperimentSeq',
'ExperimentTsaseq',
'ExperimentSet',
'ExperimentSetReplicate',
'Publication',
'FileCalibration',
'FileFastq',
'FileMicroscopy',
'FileProcessed',
'FileSet',
'FileSetCalibration',
'FileSetMicroscopeQc',
'FileVistrack',
'DataReleaseUpdate',
'WorkflowRun',
'WorkflowRunAwsem',
'WorkflowRunSbg',
'Page',
]
def get_my_env(app):
"""
Gets the env name of the currently running environment
:param app: handle to Pyramid app
:return: current env
"""
# Return value is presumably one of the above-declared environments
return app.registry.settings.get('env.name')
def _run_create_mapping(app, args):
"""
Runs create_mapping with deploy options and report errors. Allows args passed from argparse in main to override
the default deployment configuration.
:param app: pyramid application handle
:param args: args from argparse
:return: None
"""
try:
deploy_cfg = CreateMappingOnDeployManager.get_deploy_config(env=get_my_env(app), args=args, log=log,
client='create_mapping_on_deploy')
if not deploy_cfg['SKIP']:
log.info('Calling run_create_mapping for env %s.' % deploy_cfg['ENV_NAME'])
run_create_mapping(app=app,
check_first=(not deploy_cfg['WIPE_ES']),
purge_queue=args.clear_queue, # this option does not vary, so no need to override
item_order=ITEM_INDEX_ORDER,
strict=deploy_cfg['STRICT'])
else:
log.info('NOT calling run_create_mapping for env %s.' % deploy_cfg['ENV_NAME'])
exit(0)
e | xcept Exception as e:
log.error("Exception encountered while gatheri | ng deployment information or running create_mapping")
log.error("%s: %s" % (e.__class__.__name__, e))
exit(1)
def main():
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Create Elasticsearch mapping on deployment", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('--clear-queue', help="Specify to clear the SQS queue", action='store_true', default=False)
CreateMappingOnDeployManager.add_argparse_arguments(parser)
args = parser.parse_args()
app = get_app(args.config_uri, args.app_name)
# Loading app will have configured from config file. Reconfigure here:
set_logging(in_prod=app.registry.settings.get('production'), log_name=__name__, level=logging.DEBUG)
# set_logging(app.registry.settings.get('elasticsearch.server'),
# app.registry.settings.get('production'),
# level=logging.DEBUG)
_run_create_mapping(app, args)
exit(0)
if __name__ == '__main__':
main()
|
kklmn/xrt | examples/withRaycing/01_SynchrotronSources/undulatorTapering.py | Python | mit | 4,236 | 0.001416 | # -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev", "Roman Chernikov"
__date__ = "08 Mar 2016"
import pickle
import numpy as np
import matplotlib.pyplot as plt
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
import xrt.backends.raycing.screens as rsc
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.materials as rm
import xrt.plotter as xrtp
import xrt.runner as xrtr
showIn3D = False
prefix = 'taper_'
xlimits = [-0.9, 0.9]
zlimits = [-0.9, 0.9]
eMin, eMax = 10200-800, 10200+800
def build_beamline(nrays=2e5):
beamLine = raycing.BeamLine()
rs.Undulator(
beamLine, 'P06', nrays=nrays, eEspread=0.0011,
eSigmaX=34.64, eSigmaZ=6.285, eEpsilonX=1., eEpsilonZ=0.01,
period=31.4, K=2.1392-0.002, n=63, eE=6.08, eI=0.1, xPrimeMax=1.5e-2,
zPrimeMax=1.5e-2, eMin=eMin, eMax=eMax, distE='BW',
xPrimeMaxAutoReduce=False, zPrimeMaxAutoReduce=False,
# targetOpenCL='CPU',
taper=(1.09, 11.254))
beamLine.fsm1 = rsc.Screen(beamLine, 'FSM1', (0, 90000, 0))
return beamLine
def run_process(beamLine):
beamSource = beamLine.sources[0].shine()
beamFSM1 = beamLine.fsm1.expose(beamSource)
outDict = {'beamSource': beamSource,
'beamFSM1': beamFSM1}
if showIn3D:
beamLine.prepare_flow()
return outDict
rr.run_process = run_process
def define_plots(beamLine):
plots = []
plotsE = []
xaxis = xrtp.XYCAxis(r'$x$', 'mm', limits=xlimits, bins=360, ppb=1)
yaxis = xrtp.XYCAxis(r'$z$', 'mm', limits=zlimits, bins=360, ppb=1)
caxis = xrtp.XYCAxis('energy', 'keV', bins=360, ppb=1)
plot = xrtp.XYCPlot(
'beamFSM1', (1,), xaxis=xaxis, yaxis=yaxis, caxis=caxis,
aspect='auto', title='total flux', ePos=1)
plot.baseName = prefix + '1TotalFlux'
plot.saveName = plot.baseName + '.png'
plots.append(plot)
plotsE.append(plot)
for plot in plotsE:
plot.caxis.limits = eMin*1e-3, eMax*1e-3
for plot in plots:
plot.fluxFormatStr = '%.2p'
return plots, plotsE
def afterScript(plots):
plot = plots[-1]
flux = [plot.intensity, plot.nRaysAll, plot.nRaysAccepted,
plot.nRaysSeeded]
cwd = os.getcwd()
pickleName = os.path.join(cwd, plot.baseName+'.pickle')
with open(pickleName, 'wb') as f:
pickle.dump((flux, plot.caxis.binEdges, plot.caxis.total1D), f,
protocol=2)
plot_compare()
def main():
beamLine = build_beamline()
if showIn3D:
beamLine.glow()
else:
plots, plotsE = define_plots(beamLine)
xrtr.run_ray_tracing(plots, repeats=100, beamLine=beamLine,
afterScript=afterScript, afterScriptArgs=[plots])
def plot_compare():
fig1 = plt.figure(1, figsize=(7, 5))
ax = plt.subplot(111, label='1')
ax.set_xlabel(u'energy (keV)')
ax.set_ylabel(u'flux (a.u.)')
cwd = os.getcwd()
pickleName = os.path.join(cwd, 'taper_1TotalFlux.pickle')
with open(pickleName, 'rb') as f:
_f, binEdges, total1D = pickle.load(f)
dE = binEdges[1] - binEdges[0]
E = binEdges[:-1] + dE/2.
ax.plot(E, total1D/max(total1D), 'r', label='calculated by xrt', lw=2)
try:
e, f = np.loadtxt('fluxUndulator1DtaperP06.dc0', skiprows=10,
usecols=[0, 1], unpack=True)
ax.plot(e*1e-3, f/max(f), 'b', label='calculated by Spectra', lw=2)
except: # analysis:ignore
pass
# e, f = np.loadtxt('yaup-0.out', skiprows=32, usecols=[0, 1], unpack=True)
# | ax.plot(e*1e-3, f/max(f), 'g', label='calculated by YAUP/XOP', lw=2)
theta, fl = np.loadtxt("thetaexafssc1an_zn_hgap_00002r2.fio.gz",
skiprows=113, usecols=(0, 5), unpack=True)
si_1 = rm.CrystalSi(hkl=(1, 1, 1), tK=77)
E = rm.ch / (2 * si_1.d * np.sin(np.radians(theta)))
ax.plot(E*1e-3, fl/max(fl), 'k', lw=2, label='measured @ Petra3')
# ax2.set_xlim(0, None)
# ax2.set_ylim(1.400, 1.600)
ax. | legend(loc='lower center')
fig1.savefig('compareTaper.png')
plt.show()
if __name__ == '__main__':
main()
|
toumorokoshi/tornado-transmute | tornado_transmute/__init__.py | Python | mit | 189 | 0 | from transmute_core import *
# from | .handler import convert_to_handler
# from .route import route
from .route_set import RouteSet
from .url import u | rl_spec
from .swagger import add_swagger
|
azumimuo/family-xbmc-addon | plugin.video.dragon.sports/lib/utils/github/Comparison.py | Python | gpl-2.0 | 6,448 | 0.002792 | # -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import Commit
import File
class Comparison(GithubObject.GithubObject):
@property
def ahead_by(self):
self._completeIfNotSet(self._ahead_by)
return self._NoneIfNotSet(self._ahead_by)
@property
def base_commit(self):
self._completeIfNotSet(self._base_commit)
return self._NoneIfNotSet(self._base_commit)
@property
def behind_by(self):
self._completeIfNotSet(self._behind_by)
return self._NoneIfNotSet(self._behind_by)
@property
def commits(self):
self._completeIfNotSet(self._commits)
return self._NoneIfNotSet(self._commits)
@property
def diff_url(self):
self._completeIfNotSet(self._diff_url)
return self._NoneIfNotSet(self._diff_url)
@property
def files(self):
self._completeIfNotSet(self._files)
return self._NoneIfNotSet(self._files)
@property
def html_url(self):
self._completeIfNotSet(self._html_url)
return self._NoneIfNotSet(self._html_url)
@property
def patch_url(self):
self._completeIfNotSet(self._patch_url)
return self._NoneIfNotSet(self._patch_url)
@property
def permalink_url(self):
self._completeIfNotSet(self._permalink_url)
return self._NoneIfNotSet(self._permalink_url)
@property
def status(self):
self._completeIfNotSet(self._status)
return self._NoneIfNotSet(self._status)
@property
def total_commits(self):
self._completeIfNotSet(self._total_commits)
return self._NoneIfNotSet(self._total_commits)
@property
def url(self):
self._completeIfNotSet(self._url)
return self._NoneIfNotSet(self._url)
def _initAttributes(self):
self._ahead_by = GithubObject.NotSet
self._base_commit = GithubObject.NotSet
self._behind_by = GithubObject.NotSet
self._commits = GithubObject.NotSet
self._diff_url = GithubObject.NotSet
self._files = GithubObject.NotSet
self._html_url = GithubObject.NotSet
self._patch_url = GithubObject.NotSet
self._permalink_url = GithubObject.NotSet
self._status = GithubObject.NotSet
self._total_commits = GithubObject.NotSet
self._url = GithubObject.NotSet
def _useAttributes(self, attributes):
if "ahead_by" in attributes: # pragma no branch
assert attributes["ahead_by"] is None or isinstance(attributes["ahead_by"], (int, long)), attributes["ahead_by"]
self._ahead_by = attributes["ahead_by"]
if "base_commit" in attributes: # pragma no branch
assert attributes["base_commit"] is None or isinstance(attributes["base_commit"], dict), attributes["base_commit"]
self._base_commit = None if attributes["base_commit"] is None else Commit.Commit(self._requester, attributes["base_commit"], completed=False)
if "behind_by" in attributes: # pragma no branch
assert attributes["behind_by"] is None or isinstance(attributes["behind_by"], (int, long)), attributes["behind_by"]
self._behind_by = attributes["behind_by"]
if "commits" in attributes: # pragma no branch
assert attributes["commits"] is None or all(isinstance(element, dict) for element in attributes["commits"]), | attributes["commits"]
self._commits = None if attributes["commits"] is None else [
Commit.Commit(self._requester, element, completed=False)
for element in attri | butes["commits"]
]
if "diff_url" in attributes: # pragma no branch
assert attributes["diff_url"] is None or isinstance(attributes["diff_url"], (str, unicode)), attributes["diff_url"]
self._diff_url = attributes["diff_url"]
if "files" in attributes: # pragma no branch
assert attributes["files"] is None or all(isinstance(element, dict) for element in attributes["files"]), attributes["files"]
self._files = None if attributes["files"] is None else [
File.File(self._requester, element, completed=False)
for element in attributes["files"]
]
if "html_url" in attributes: # pragma no branch
assert attributes["html_url"] is None or isinstance(attributes["html_url"], (str, unicode)), attributes["html_url"]
self._html_url = attributes["html_url"]
if "patch_url" in attributes: # pragma no branch
assert attributes["patch_url"] is None or isinstance(attributes["patch_url"], (str, unicode)), attributes["patch_url"]
self._patch_url = attributes["patch_url"]
if "permalink_url" in attributes: # pragma no branch
assert attributes["permalink_url"] is None or isinstance(attributes["permalink_url"], (str, unicode)), attributes["permalink_url"]
self._permalink_url = attributes["permalink_url"]
if "status" in attributes: # pragma no branch
assert attributes["status"] is None or isinstance(attributes["status"], (str, unicode)), attributes["status"]
self._status = attributes["status"]
if "total_commits" in attributes: # pragma no branch
assert attributes["total_commits"] is None or isinstance(attributes["total_commits"], (int, long)), attributes["total_commits"]
self._total_commits = attributes["total_commits"]
if "url" in attributes: # pragma no branch
assert attributes["url"] is None or isinstance(attributes["url"], (str, unicode)), attributes["url"]
self._url = attributes["url"]
|
eruvanos/openbrokerapi | tests/test_last_binding_operation.py | Python | mit | 2,530 | 0.005138 | import http
from openbrokerapi.service_broker import LastOperation, OperationState
from tests import BrokerTestCase
class LastBindingOperationTest(BrokerTestCase):
def setUp(self):
self.broker.service_id.return_value = 'service-guid-here'
def test_last_operation_called_just_with_required_fields(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation',
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id', None, None, None)
def test_last_operation_called_with_operation_data(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
query = 'service_id=&plan_id=456&operation=service-guid-here%20operation-data'
self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation?%s' % query,
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
|
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id',
'service-guid-here operation-data', "", "456")
def test_returns_200_with_given_state(self):
self.broker.last_binding_operation.return_value = LastOperation(OperationState.IN_PROGRESS, 'Running...')
query = 'service_id=123&plan_id=456&operation=servic | e-guid-here%20operation-data'
response = self.client.get(
'/v2/service_instances/here-instance_id/service_bindings/binding_id/last_operation?%s' % query,
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.broker.last_binding_operation.assert_called_once_with('here-instance_id', 'binding_id',
'service-guid-here operation-data', "123", "456")
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(response.json, dict(
state=OperationState.IN_PROGRESS.value,
description='Running...'
))
|
jalilm/ryu | ryu/tests/unit/packet/test_igmp.py | Python | apache-2.0 | 34,411 | 0 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import inspect
import logging
from struct import pack, unpack_from, pack_into
from nose.tools import ok_, eq_, raises
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.packet_utils import checksum
from ryu.lib import addrconv
from ryu.lib.packet.igmp import igmp
from ryu.lib.packet.igmp import igmpv3_query
from ryu.lib.packet.igmp import igmpv3_report
from ryu.lib.packet.igmp import igmpv3_report_group
from ryu.lib.packet.igmp import IGMP_TYPE_QUERY
from ryu.lib.packet.igmp import IGMP_TYPE_REPORT_V3
from ryu.lib.packet.igmp import MODE_IS_INCLUDE
LOG = logging.getLogger(__name__)
class Test_igmp(unittest.TestCase):
""" Test case for Internet Group Management Protocol
"""
def setUp(self):
self.msgtype = IGMP_TYPE_QUERY
self.maxresp = 100
self.csum = 0
self.address = '225.0.0.1'
self.buf = pack(igmp._PACK_STR, self.msgtype, self.maxresp,
self.csum,
addrconv.ipv4.text_to_bin(self.address))
self.g = igmp(self.msgtype, self.maxresp, self.csum,
self.address)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.maxresp, self.g.maxresp)
eq_(self.csum, self.g.csum)
eq_(self.address, self.g.address)
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.maxresp, self.maxresp)
eq_(res.csum, self.csum)
eq_(res.address, self.address)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmp._PACK_STR, buffer(buf))
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
def _build_igmp(self):
dl_dst = '11:22:33:44:55:66'
dl_src = 'aa:bb:cc:dd:ee:ff'
dl_type = ether.ETH_TYPE_IP
e = ethernet(dl_dst, dl_src, dl_type)
total_length = 20 + igmp._MIN_LEN
nw_proto = inet.IPPROTO_IGMP
nw_dst = '11.22.33.44'
nw_src = '55.66.77.88'
i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst,
proto=nw_proto)
p = Packet()
p.add_protocol(e)
p.add_protocol(i)
p.add_protocol(self.g)
p.serialize()
return p
def test_build_igmp(self):
p = self._build_igmp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IP)
i = self.find_protocol(p, "ipv4")
| ok_(i)
eq_(i.proto, inet.IPPROTO_IGMP)
g = self.find_protocol(p, "ig | mp")
ok_(g)
eq_(g.msgtype, self.msgtype)
eq_(g.maxresp, self.maxresp)
eq_(g.csum, checksum(self.buf))
eq_(g.address, self.address)
def test_to_string(self):
igmp_values = {'msgtype': repr(self.msgtype),
'maxresp': repr(self.maxresp),
'csum': repr(self.csum),
'address': repr(self.address)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmp.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
@raises(Exception)
def test_malformed_igmp(self):
m_short_buf = self.buf[1:igmp._MIN_LEN]
igmp.parser(m_short_buf)
def test_default_args(self):
ig = igmp()
buf = ig.serialize(bytearray(), None)
res = unpack_from(igmp._PACK_STR, str(buf))
eq_(res[0], 0x11)
eq_(res[1], 0)
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
def test_json(self):
jsondict = self.g.to_jsondict()
g = igmp.from_jsondict(jsondict['igmp'])
eq_(str(self.g), str(g))
class Test_igmpv3_query(unittest.TestCase):
""" Test case for Internet Group Management Protocol v3
Membership Query Message"""
def setUp(self):
self.msgtype = IGMP_TYPE_QUERY
self.maxresp = 100
self.csum = 0
self.address = '225.0.0.1'
self.s_flg = 0
self.qrv = 2
self.qqic = 10
self.num = 0
self.srcs = []
self.s_qrv = self.s_flg << 3 | self.qrv
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
def setUp_with_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs)
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.maxresp, self.g.maxresp)
eq_(self.csum, self.g.csum)
eq_(self.address, self.g.address)
eq_(self.s_flg, self.g.s_flg)
eq_(self.qrv, self.g.qrv)
eq_(self.qqic, self.g.qqic)
eq_(self.num, self.g.num)
eq_(self.srcs, self.g.srcs)
def test_init_with_srcs(self):
self.setUp_with_srcs()
self.test_init()
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.maxresp, self.maxresp)
eq_(res.csum, self.csum)
eq_(res.address, self.address)
eq_(res.s_flg, self.s_flg)
eq_(res.qrv, self.qrv)
eq_(res.qqic, self.qqic)
eq_(res.num, self.num)
eq_(res.srcs, self.srcs)
def test_parser_with_srcs(self):
self.setUp_with_srcs()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_query._PACK_STR, buffer(buf))
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(res[4], self.s_qrv)
eq_(res[5], self.qqic)
eq_( |
google-research/football | gfootball/env/gym_test.py | Python | apache-2.0 | 1,391 | 0.005751 | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GFootball environment using OpenAI Gym test."""
from __future__ import absolute_import
from __futu | re__ import division
from __future__ import print_function
import unittest
import gym
from absl.testing import parameterized
class GymTest( | parameterized.TestCase):
@parameterized.parameters(('scoring'), ('scoring,checkpoints'))
def test_environment(self, rewards):
# Tests it is possible to create and run an environment twice.
for _ in range(2):
env = gym.make('gfootball:GFootball-11_vs_11_easy_stochastic-SMM-v0',
stacked=True, rewards=rewards)
env.reset()
for _ in range(10):
_, _, done, _ = env.step(env.action_space.sample())
if done:
env.reset()
env.close()
if __name__ == '__main__':
unittest.main()
|
sfairhur/pycbc | pycbc/strain/strain.py | Python | gpl-3.0 | 78,684 | 0.003978 | #Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions reading, generating, and segmenting strain data
"""
import copy
import logging, numpy
import pycbc.noise
import pycbc.types
from pycbc.types import TimeSeries, zeros
from pycbc.types import Array, FrequencySeries, complex_same_precision_as
from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
from pycbc.types import MultiDetOptionActionSpecial
from pycbc.types import required_opts, required_opts_multi_ifo
from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
from pycbc.types import copy_opts_for_single_ifo
from pycbc.inject import InjectionSet, SGBurstInjectionSet
from pycbc.filter import resample_to_delta_t, highpass, make_frequency_series
from pycbc.filter.zpk import filter_zpk
from pycbc.waveform.spa_tmplt import spa_distance
import pycbc.psd
import pycbc.fft
import pycbc.events
import pycbc.frame
import pycbc.filter
from scipy.signal import kaiserord
def next_power_of_2(n):
"""Return the smallest integer power of 2 larger than the argument.
Parameters
----------
n : int
A positive integer.
Returns
-------
m : int
Smallest integer power of 2 larger than n.
"""
return 1 << n.bit_length()
def detect_loud_glitches(strain, psd_duration=4., psd_stride=2.,
psd_avg_method='median', low_freq_cutoff=30.,
threshold=50., cluster_window=5., corrupt_time=4.,
high_freq_cutoff=None, output_intermediates=False):
"""Automatic identification of loud transients for gating purposes.
This function first estimates the PSD of the input time series using the
FindChirp Welch method. Then it whitens the time series using that
estimate. Finally, it computes the magnitude of the whitened series,
thresholds it and applies the FindChirp clustering over time to the
surviving samples.
Parameters
----------
strain : TimeSeries
Input strain time series to detect glitches over.
psd_duration : {float, 4}
Duration of the segments for PSD estimation in seconds.
psd_stride : {float, 2}
Separation between PSD estimation segments in seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq_cutoff : {float, 30}
Minimum frequency to include in the whitened strain.
threshold : {float, 50}
Minimum magnitude of whitened strain for considering a transient to
be present.
cluster_window : {float, 5}
Length of time window to cluster surviving samples over, in seconds.
corrupt_time : {float, 4}
Amount of time to be discarded at the beginning and end of the input
time series.
high_frequency_cutoff : {float, None}
Maximum frequency to include in the whitened strain. If given, the
inpu | t series is downsampled accordingly. If omitted, the Nyquist
frequency is used.
output_intermedia | tes : {bool, False}
Save intermediate time series for debugging.
"""
# don't waste time trying to optimize a single FFT
pycbc.fft.fftw.set_measure_level(0)
if high_freq_cutoff:
strain = resample_to_delta_t(strain, 0.5 / high_freq_cutoff,
method='ldas')
else:
strain = strain.copy()
# taper strain
corrupt_length = int(corrupt_time * strain.sample_rate)
w = numpy.arange(corrupt_length) / float(corrupt_length)
strain[0:corrupt_length] *= pycbc.types.Array(w, dtype=strain.dtype)
strain[(len(strain) - corrupt_length):] *= \
pycbc.types.Array(w[::-1], dtype=strain.dtype)
if output_intermediates:
strain.save_to_wav('strain_conditioned.wav')
# zero-pad strain to a power-of-2 length
strain_pad_length = next_power_of_2(len(strain))
pad_start = int(strain_pad_length / 2 - len(strain) / 2)
pad_end = pad_start + len(strain)
pad_epoch = strain.start_time - pad_start / float(strain.sample_rate)
strain_pad = pycbc.types.TimeSeries(
pycbc.types.zeros(strain_pad_length, dtype=strain.dtype),
delta_t=strain.delta_t, copy=False, epoch=pad_epoch)
strain_pad[pad_start:pad_end] = strain[:]
# estimate the PSD
psd = pycbc.psd.welch(strain[corrupt_length:(len(strain)-corrupt_length)],
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method,
require_exact_data_fit=False)
psd = pycbc.psd.interpolate(psd, 1. / strain_pad.duration)
psd = pycbc.psd.inverse_spectrum_truncation(
psd, int(psd_duration * strain.sample_rate),
low_frequency_cutoff=low_freq_cutoff,
trunc_method='hann')
kmin = int(low_freq_cutoff / psd.delta_f)
psd[0:kmin] = numpy.inf
if high_freq_cutoff:
kmax = int(high_freq_cutoff / psd.delta_f)
psd[kmax:] = numpy.inf
# whiten
strain_tilde = strain_pad.to_frequencyseries()
if high_freq_cutoff:
norm = high_freq_cutoff - low_freq_cutoff
else:
norm = strain.sample_rate / 2. - low_freq_cutoff
strain_tilde *= (psd * norm) ** (-0.5)
strain_pad = strain_tilde.to_timeseries()
if output_intermediates:
strain_pad[pad_start:pad_end].save_to_wav('strain_whitened.wav')
mag = abs(strain_pad[pad_start:pad_end])
if output_intermediates:
mag.save('strain_whitened_mag.npy')
mag = mag.numpy()
# remove strain corrupted by filters at the ends
mag[0:corrupt_length] = 0
mag[-1:-corrupt_length-1:-1] = 0
# find peaks and their times
indices = numpy.where(mag > threshold)[0]
cluster_idx = pycbc.events.findchirp_cluster_over_window(
indices, numpy.array(mag[indices]),
int(cluster_window*strain.sample_rate))
times = [idx * strain.delta_t + strain.start_time \
for idx in indices[cluster_idx]]
pycbc.fft.fftw.set_measure_level(pycbc.fft.fftw._default_measurelvl)
return times
def from_cli(opt, dyn_range_fac=1, precision='single',
inj_filter_rejector=None):
"""Parses the CLI options related to strain data reading and conditioning.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, (frame-cache or frame-files), channel-name,
fake-strain, fake-strain-seed, fake-strain-from-file, gating_file).
dyn_range_fac : {float, 1}, optional
A large constant to reduce the dynamic range of the strain.
precision : string
Precision of the returned strain ('single' or 'double').
inj_filter_rejector : InjFilterRejector instance; optional, default=None
If given send the InjFilterRejector instance to the inject module so
that it can store a reduced representation of injections if
necessary.
Returns
-------
strain : TimeSeries
The time series containing the conditioned strain data.
"""
gating_info = {}
if opt.frame_cache or opt.frame_files or opt.frame_type or opt.hdf_store:
if opt.frame_cache:
frame_source = opt.frame_cache
if opt.frame_files:
|
tedunderwood/character | oldcode/combine_hathi_summaries.py | Python | mit | 1,368 | 0.009503 | # combine_all_summaries
# this fuses the summary files from pre23_hathi,
# post22_hathi, and chicago, to create a file
# where there is only one entry for each
# char, auth, date tuple
import csv
from collections import Counter
counts = dict()
columns = ['characters', 'speaking', 'agent', 'mod', 'patient', 'poss', 'total']
fieldnames = ['chargender', 'authgender', 'date', 'characters', 'speaking', 'agent', 'mod', 'patient', 'poss', 'total']
def add2counts(f | ilepath, counts):
with open(filepath, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
triplet = (row['chargender'], row['authgender'], row['date'])
if triplet not in counts:
counts[triplet] = Counter()
for col in columns:
counts[triplet][col] += int(row[col])
return counts
ad | d2counts('post22hathi/corrected_post22_summary.csv', counts)
add2counts('pre23hathi/corrected_pre23_hathi_summary.csv', counts)
with open('corrected_hathi_summaries.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fieldnames)
writer.writeheader()
for triplet, colcounts in counts.items():
r = dict()
r['chargender'], r['authgender'], r['date'] = triplet
for col in columns:
r[col] = colcounts[col]
writer.writerow(r)
|
rdevon/cortex | cortex/built_ins/networks/fully_connected.py | Python | bsd-3-clause | 863 | 0 | '''Simple dense network encoders
'''
import logging
import torch.nn as nn
from .base_network import BaseNet
logger = logging.getLogger('cortex.arch' + __name__)
class FullyConnectedNet(BaseNet):
def __init__(self, dim_in, dim_out=None, dim_h=64, dim_ex=None,
nonlinearity='ReLU', n_levels=None, output_nonlinearity=None,
normalize_input=False, **layer_args):
super(FullyConnectedNet, self).__init__(
nonlinearity=nonlinearity, output_nonlinearity=output_nonlinearity)
dim_h = self.get_h(dim_h, n_levels=n_levels)
if normalize_input:
self.models.add_module('initial_bn', nn.BatchNorm1d(dim_in))
dim_in = self.add_linear_layers(dim_in, d | im_h, dim_ex=dim_ex,
**layer_args)
self.add_output_layer(dim_in, dim_out) | |
RDFLib/rdflib | test/test_n3.py | Python | bsd-3-clause | 8,671 | 0.001845 | import os
import pytest
from rdflib.graph import Graph, ConjunctiveGraph
import unittest
from rdflib.term import Literal, URIRef
from rdflib.plugins.parsers.notation3 import BadSyntax, exponent_syntax
import itertools
from urllib.error import URLError
from test import TEST_DIR
test_data = """
# Definitions of terms describing the n3 model
#
@keywords a.
@prefix n3: <#>.
@prefix log: <log.n3#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix : <#> .
@forAll :s, :p, :x, :y, :z.
n3:Statement a rdf:Class .
n3:StatementSet a rdf:Class .
n3:includes a rdfs:Property . # Cf rdf:li
n3:predicate a rdf:Property; rdfs:domain n3:statement .
n3:subject a rdf:Property; rdfs:domain n3:statement .
n3:object a rdf:Property; rdfs:domain n3:statement .
n3:context a rdf:Property; rdfs:domain n3:statement;
rdfs:range n3:StatementSet .
########### Rules
{ :x :p :y . } log:means { [
n3:subject :x;
n3:predicate :p;
n3:object :y ] a log:Truth}.
# Needs more thought ... ideally, we have the implcit AND rules of
# juxtaposition (introduction and elimination)
{
{
{ :x n3:includes :s. } log:implies { :y n3:includes :s. } .
} forall :s1 .
} log:implies { :x log:implies :y } .
{
{
{ :x n3:includes :s. } log:implies { :y n3:includes :s. } .
} forall :s1
} log:implies { :x log:implies :y } .
# I think n3:includes has to be axiomatic builtin. - unless you go to syntax description.
# syntax.n3?
"""
class TestN3Case(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBaseCumulative(self):
"""
Test that the n3 parser supports base declarations
This is issue #22
"""
input = """
@prefix : <http://example.com/> .
# default base
<foo> :name "Foo" .
# change it
@base <http://example.com/doc/> .
<bar> :name "Bar" .
# and change it more - they are cumulative
@base <doc2/> .
<bing> :name "Bing" .
# unless abosulute
@base <http://test.com/> .
<bong> :name "Bong" .
"""
g = Graph()
g.parse(data=input, format="n3")
print(list(g))
self.assertTrue((None, None, | Literal("Foo")) in g)
self.assertTrue((URIRef("http://example.com/doc/ba | r"), None, None) in g)
self.assertTrue((URIRef("http://example.com/doc/doc2/bing"), None, None) in g)
self.assertTrue((URIRef("http://test.com/bong"), None, None) in g)
def testBaseExplicit(self):
"""
Test that the n3 parser supports resolving relative URIs
and that base will override
"""
input = """
@prefix : <http://example.com/> .
# default base
<foo> :name "Foo" .
# change it
@base <http://example.com/doc/> .
<bar> :name "Bar" .
"""
g = Graph()
g.parse(data=input, publicID="http://blah.com/", format="n3")
print(list(g))
self.assertTrue((URIRef("http://blah.com/foo"), None, Literal("Foo")) in g)
self.assertTrue((URIRef("http://example.com/doc/bar"), None, None) in g)
def testBaseSerialize(self):
g = Graph()
g.add(
(
URIRef("http://example.com/people/Bob"),
URIRef("urn:knows"),
URIRef("http://example.com/people/Linda"),
)
)
s = g.serialize(base="http://example.com/", format="n3", encoding="latin-1")
self.assertTrue(b"<people/Bob>" in s)
g2 = ConjunctiveGraph()
g2.parse(data=s, publicID="http://example.com/", format="n3")
self.assertEqual(list(g), list(g2))
def testIssue23(self):
input = """<http://example.com/article1> <http://example.com/title> "this word is in \\u201Cquotes\\u201D"."""
g = Graph()
g.parse(data=input, format="n3")
# Note difference in case of hex code, cwm allows lower-case
input = """<http://example.com/article1> <http://example.com/title> "this word is in \\u201cquotes\\u201d"."""
g.parse(data=input, format="n3")
def testIssue29(self):
input = """@prefix foo-bar: <http://example.org/> .
foo-bar:Ex foo-bar:name "Test" . """
g = Graph()
g.parse(data=input, format="n3")
def testIssue68(self):
input = """@prefix : <http://some.url/pome#>.\n\n:Brecon a :Place;\n\t:hasLord\n\t\t:Bernard_of_Neufmarch\xc3\xa9 .\n """
g = Graph()
g.parse(data=input, format="n3")
def testIssue156(self):
"""
Make sure n3 parser does not choke on UTF-8 BOM
"""
g = Graph()
n3_path = os.path.relpath(os.path.join(TEST_DIR, "n3/issue156.n3", os.curdir))
g.parse(n3_path, format="n3")
def testIssue999(self):
"""
Make sure the n3 parser does recognize exponent and leading dot in ".171e-11"
"""
data = """
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
<http://qudt.org/vocab/unit/MilliM-PER-YR>
a <http://qudt.org/schema/qudt/Unit> ;
<http://qudt.org/schema/qudt/conversionMultiplier> .171e-11 ;
<http://qudt.org/schema/qudt/conversionOffset> 0e+00 ;
<http://qudt.org/schema/qudt/description> "0.001-fold of the SI base unit metre divided by the unit year" ;
<http://qudt.org/schema/qudt/hasQuantityKind> <http://qudt.org/vocab/quantitykind/Velocity> ;
<http://qudt.org/schema/qudt/iec61360Code> "0112/2///62720#UAA868" ;
<http://qudt.org/schema/qudt/uneceCommonCode> "H66" ;
rdfs:isDefinedBy <http://qudt.org/2.1/vocab/unit> ;
rdfs:isDefinedBy <http://qudt.org/vocab/unit> ;
rdfs:label "MilliM PER YR" ;
<http://www.w3.org/2004/02/skos/core#prefLabel> "millimetre per year" ;
.
"""
g = Graph()
g.parse(data=data, format="n3")
g.parse(data=data, format="turtle")
def testDotInPrefix(self):
g = Graph()
g.parse(
data="@prefix a.1: <http://example.org/> .\n a.1:cake <urn:x> <urn:y> . \n",
format="n3",
)
def testModel(self):
g = ConjunctiveGraph()
g.parse(data=test_data, format="n3")
i = 0
for s, p, o in g:
if isinstance(s, Graph):
i += 1
self.assertEqual(i, 3)
self.assertEqual(len(list(g.contexts())), 13)
g.close()
def testQuotedSerialization(self):
g = ConjunctiveGraph()
g.parse(data=test_data, format="n3")
g.serialize(format="n3")
def testParse(self):
g = ConjunctiveGraph()
try:
g.parse(
"http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3",
format="n3",
)
except URLError:
pytest.skip("No network to retrieve the information, skipping test")
def testSingleQuotedLiterals(self):
test_data = [
"""@prefix : <#> . :s :p 'o' .""",
"""@prefix : <#> . :s :p '''o''' .""",
]
for data in test_data:
# N3 doesn't accept single quotes around string literals
g = ConjunctiveGraph()
self.assertRaises(BadSyntax, g.parse, data=data, format="n3")
g = ConjunctiveGraph()
g.parse(data=data, format="turtle")
self.assertEqual(len(g), 1)
for _, _, o in g:
self.assertEqual(o, Literal("o"))
def testEmptyPrefix(self):
# this is issue https://github.com/RDFLib/rdflib/issues/312
g1 = Graph()
g1.parse(data=":a :b :c .", format="n3")
g2 = Graph()
g2.parse(data="@prefix : <#> . :a :b :c .", format="n3")
assert set(g1) == set(
g2
), "Document with declared empty prefix must match default #"
class TestRegularExpressions(unittest.TestCase):
def testExponents(self):
signs = ("", "+", "-")
mantissas = (
"1",
"1.",
".1",
"12",
"12.",
"1.2",
".12",
"123",
"123.",
"12.3",
"1.23",
".123",
|
mezz64/home-assistant | homeassistant/components/freedompro/sensor.py | Python | apache-2.0 | 3,148 | 0.000318 | """Support for Freedompro sensor."""
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import LIGHT_LUX, PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
DEVICE_CLASS_MAP = {
"temperatureSensor": SensorDeviceClass.TEMPERATURE,
"humiditySensor": SensorDeviceClass.HUMIDITY,
"lightSensor": SensorDeviceClass.ILLUMINANCE,
}
STATE_CLASS_MAP = {
"temperatureSensor": SensorStateClass.MEASUREMENT,
"h | umiditySensor": SensorStateClass.MEASUREMENT,
"lightSensor": None,
}
UNIT_MAP = {
"temperatureSensor": TEMP_CELSIUS,
" | humiditySensor": PERCENTAGE,
"lightSensor": LIGHT_LUX,
}
DEVICE_KEY_MAP = {
"temperatureSensor": "currentTemperature",
"humiditySensor": "currentRelativeHumidity",
"lightSensor": "currentAmbientLightLevel",
}
SUPPORTED_SENSORS = {"temperatureSensor", "humiditySensor", "lightSensor"}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Freedompro sensor."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
Device(device, coordinator)
for device in coordinator.data
if device["type"] in SUPPORTED_SENSORS
)
class Device(CoordinatorEntity, SensorEntity):
"""Representation of an Freedompro sensor."""
def __init__(self, device, coordinator):
"""Initialize the Freedompro sensor."""
super().__init__(coordinator)
self._attr_name = device["name"]
self._attr_unique_id = device["uid"]
self._type = device["type"]
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self.unique_id),
},
manufacturer="Freedompro",
model=device["type"],
name=self.name,
)
self._attr_device_class = DEVICE_CLASS_MAP[device["type"]]
self._attr_state_class = STATE_CLASS_MAP[device["type"]]
self._attr_native_unit_of_measurement = UNIT_MAP[device["type"]]
self._attr_native_value = 0
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
device = next(
(
device
for device in self.coordinator.data
if device["uid"] == self.unique_id
),
None,
)
if device is not None and "state" in device:
state = device["state"]
self._attr_native_value = state[DEVICE_KEY_MAP[self._type]]
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
|
vrcmarcos/django-http-model | core/urls.py | Python | mit | 876 | 0 | """core URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add | an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls i | mport url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from test_app import views as test_app_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^test_app/companies/', test_app_views.companies_list_view),
]
|
nealegibson/Infer | src/MCMC_BGibbs.py | Python | gpl-3.0 | 7,699 | 0.039356 |
import numpy as np
np.seterr(divide='ignore') #ignore errors in log division
np.seterr(all='ignore') #ignore errors in log division
import sys
import time
##########################################################################################
def BGMCMC(LogPosterior,gp,post_args,ch_len,ep,gibbs_index,chain_filenames='MCMC_chain',n_chains=0,\
adapt_limits=(0,0,0),glob_limits=(0,0,0),thin=1,orth=0,acc=0.234):
"""
Generalisation of the MCMC.py code to allow for blocked Gibbs sampling. See MCMC
docstring for details. Here I've added the addition of a Gibbs index, which is just an
array of the same size as the inputs, which includes the order in which to index. 0
indicates the parmeters is not to vary.
eg gibbs_index = [0,1,0,1,2,2,3] will vary parameters 1, 2 then 3 in turn, evaluating
the log posterior at each and accepting them according to the MH rule.
Note the logPosterior will be evaluated ch_len * no_gibbs_steps times, and is especially
useful when using the InferGP class posterior, which only constructs/inverts the covariance
when necessary, and stores previous results
acc - target acceptance ratio - for infinite iid Gaussian dist -> 23.4%, for single par
is 44%. These will be varied independently for each block, and can have a different target
acceptance if an array is provided
When orth=0 (ie default) be aware that parameters in different blocks may still be
correlated. This is taken into account in the separate scaling to some degree, but
highly correlated variables should probably be in the same block, or alternatively set
orth = 1 for orthogonal steps (an intermediate solution is possible, but I couldn't be
bothered coding it right now)
"""
#first set chain filenames
if n_chains > 0: chain_filenames = ["MCMC_chain_%d" % ch_no for ch_no in range(1,n_chains+1)]
#print parameters
PrintParams(chain_filenames,ch_len,LogPosterior,adapt_limits,glob_limits,gp,ep,gibbs_index)
print ('-' * 80)
#prep gibbs array
gibbs = np.array(gibbs_index)
no_steps = gibbs.max()
gi = range(no_steps)
for q in range(1,no_steps+1):
gi[q-1] = np.where(gibbs==q) #get index to the gibbs steps
####### loop over chains ###############
for n,chain in enumerate(chain_filenames):
#initialise parameters
p,e = np.copy(gp),np.copy(ep)
p_acc,L_acc = np.copy(p),-np.inf
#arrays for storing results
ParArr = np.zeros((ch_len/thin)*len(p)).reshape(ch_len/thin,len(p))
PostArr = np.zeros(ch_len/thin)
AccArr = np.zeros(ch_len*no_steps).reshape(ch_len,no_steps) #acceptance rate for each Gibbs block
#jump parameters
#error array computed in advance - much faster to compute as a block
G = np.zeros(no_steps) #set default G depending on no of varying parameters per block
for q in range(1,no_steps+1): G[q-1] = (2.4**2/(gibbs==q).sum())
Garr = np.array([G[v-1] if v>0 else 0 for v in gibbs])
ACC = np.ones(no_steps) * acc
K = np.diag(e**2) #create starting (diagonal) covariance matrix
#RA = np.random.normal(0.,1.,len(p)*ch_len).reshape(ch_len,len(p)) * e * G
np.random.seed()
RandArr = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len) * Garr
#set columns to zero after too! - for large K sometimes zero variance parameters have small random scatter
RandArr[:][:,np.where(e==0.)[0]] = 0.
#print "Computing Chain %d: '%s' " % (n+1,chain),
start = time.time()
####### individual chain ###############
for i in xrange(ch_len):
if i % ((ch_len)/20) == 0:
PrintBar(n,chain,i,ch_len,AccArr,start,no_steps)
#sys.stdout.write('#'); sys.stdout.flush();
#Blocked Gibbs algorithm
#cycle over Gibbs steps
for q in range(no_steps):
#gi = np.where(gibbs==q) #get index to the gibbs steps
#print "step = ",q,
p_prop = np.copy(p_acc)
p_prop[gi[q]] += RandArr[i][gi[q]]
#print p_prop
L_prop = LogPosterior(p_prop,*post_args)
#Metropolis algorithm to accept step
if np.random.rand() < np.exp(L_prop - L_acc):
p_acc,L_acc = p_prop,L_prop
AccArr[i][q] = 1 #update acceptance array (store no. acceptances for gibbs)
# print "acc"
#add new posterior and parameters to chain
if i%thin==0: ParArr[i/thin],PostArr[i/thin] = p_acc,L_acc
#adaptive stepsizes
if (i <= adapt_limits[1]) and (i > adapt_limits[0]):
if (i-adapt_limits[0]) % ((adapt_limits[1]-adapt_limits[0])/adapt_limits[2]) == 0:
#RA = np.random.normal(0.,1.,len(p)*ch_len).reshape(ch_len,len(p)) * e * G
if orth: K = np.diag(((e + 4*ParArr[adapt_limits[0]/thin:i/thin].std(axis=0))/5.)**2.) #for diagonal covariance matrix
else: K = (K + 4.*np.cov(ParArr[adapt_limits[0]/thin:i/thin],rowvar=0))/5.
K[np.where(e==0.)],K[:,np.where(e==0.)] = 0.,0. #reset error=0. values to 0.
RandArr[i:] = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len-i) * Garr
RandArr[i:][:,np.where(e==0.)[0]] = 0. #set columns to zero after too!
#adaptive global step size
if (i <= glob_limits[1]) and (i > glob_limits[0]):
if (i-glob_limits[0]) % ((glob_limits[1]-glob_limits[0])/glob_limits[2]) == 0:
for q in range(no_steps): #update G for each block
G[q] *= (1./ACC[q]) * min(0.9,max(0.1,AccArr[:,q][i-(glob_limits[1]-glob_limits[0])/glob_limits[2]:i].sum()/((glob_limits[1]-glob_limits[0])/glob_limits[2])))
Garr = np.array([G[v-1] if v>0 else 0 for v in gibbs])
RandArr[i:] = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len-i) * Garr
RandArr[i:][:,np.where(e==0.)[0]] = 0.
#print G
####### end individual chain ###########
PrintBar(n,chain,i,ch_len,AccArr,start,no_steps); print
np.save(chain+".npy",np.concatenate([PostArr.reshape(PostArr.size,1),ParArr],axis=1))
####### end loop over chains ############
print ('-' * 80)
##########################################################################################
def PrintBar(n,chain,i,ch_len,AccArr,start,no_steps):
ts = time.time()-start
if i <= ch_len/5:
a_str = ""
a_str2 = ""
else:
a_str = "" if i <= ch_len/5 else ", acc = %.2f%%" % (100.*np.float(AccArr[ch_len/5:i].sum())/no_steps/(i-ch_len/5+1))
a_str2 = "["+"".join(["%.2f%%," % (100.*np.float(AccArr[ch_len/5:i].sum(axis | =0)[q])/(i-ch_len/5+1)) for q in range(no_steps)])+"\b]"
print ("\rComputing Chain %d: '%s' %-20s t = %dm %.2fs%s" % (n+1,chain,'#'*(i/(ch_len/20)+1),ts // 60., ts % 60.,a_str),)
print (a_str2,)
sys.stdout.flush();
##########################################################################################
def PrintParams(ch_fil | enames,ch_len,posterior,adapt_limits,glob_limits,gp,ep,gibbs):
print ("Infer.BGMCMC runnning...")
print ("Blocked Gibbs MCMC parameters:")
print (" No Chains: %d" % len(ch_filenames))
print (" Chain Length: %d" % ch_len)
if(adapt_limits[2]): print (" Relative-step adaption limits: (%d,%d,%d)" % (adapt_limits[0],adapt_limits[1],adapt_limits[2]))
if(glob_limits[2]): print (" Global-step adaption limits: (%d,%d,%d)" % (glob_limits[0],glob_limits[1],glob_limits[2]))
print (" Computing chains:", ch_filenames)
print (" Posterior probability function: ", posterior)
print (" Function params <value prop_size [block]>:")
for q in range(len(gp)):
print (" p[%d] = %f +- %f [%d]" % (q,gp[q],ep[q],gibbs[q]))
##########################################################################################
|
tempbottle/concoord | concoord/threadingobject/dboundedsemaphore.py | Python | bsd-3-clause | 1,447 | 0.001382 | """
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Bounded Semaphore Coordination Object
@copyright: See LICENSE
"""
from threading import Lock
from concoord.exce | ption import *
class DBoundedSemaphore():
def __init__(self, count=1):
if count < 0:
raise ValueError
self.__count = int(count)
self.__queue = []
| self.__atomic = Lock()
self._initial_value = int(count)
def __repr__(self):
return "<%s count=%d init=%d>" % (self.__class__.__name__, self.__count, self._initial_value)
def acquire(self, _concoord_command):
with self.__atomic:
self.__count -= 1
if self.__count < 0:
self.__queue.append(_concoord_command)
raise BlockingReturn
else:
return True
def release(self, _concoord_command):
with self.__atomic:
if self.__count == self._initial_value:
return ValueError("Semaphore released too many times")
else:
self.__count += 1
if len(self.__queue) > 0:
unblockcommand = self.__queue.pop(0)
# add the popped command to the exception args
unblocked = {}
unblocked[unblockcommand] = True
raise UnblockingReturn(unblockeddict=unblocked)
def __str__(self):
return "<%s object>" % (self.__class__.__name__)
|
vanant/googleads-dfa-reporting-samples | python/v2.1/get_files.py | Python | apache-2.0 | 2,171 | 0.005988 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed | to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limi | tations under the License.
"""This example illustrates how to get a list of all the files for a profile.
Tags: files.list
"""
__author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to use')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct a get request for the specified profile.
request = service.files().list(profileId=profile_id)
while True:
# Execute request and print response.
response = request.execute()
for report_file in response['items']:
print ('File with ID %s and file name "%s" has status "%s".'
% (report_file['id'], report_file['fileName'],
report_file['status']))
if response['items'] and response['nextPageToken']:
request = service.files().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
karllessard/tensorflow | tensorflow/python/distribute/v1/cross_device_ops_test.py | Python | apache-2.0 | 34,390 | 0.006077 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps in v1 graph mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import threading
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import kernels
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
def _get_devices(devices):
if isinstance(devices, (tuple, list)):
return tuple(device_util.resolve(d) for d in devices)
elif isinstance(devices, value_lib.DistributedValues):
return devices._devices
elif isinstance(devices, ops.Tensor):
return (device_util.resolve(devices.device),)
return (device_util.resolve(devices),)
def _make_per_replica(values, devices, regroup=False):
devices = _get_devices(devices)
assert len(values) == len(devices)
# We simulate the result of regroup called on PerReplica which strips the
# PerReplica wrapper if it has only one value.
if len(values) == 1 and regroup:
with ops.device(devices[0]):
placed_v = array_ops.identity(values[0])
return placed_v
index = []
for d, v in zip(devices, values):
with ops.device(d):
placed_v = array_ops.identity(v)
index.append(placed_v)
return distribute_utils.regroup(index)
# pylint: disable=g-doc-args,g-doc-return-or-yield
def _fake_mirrored( | value, devices):
"""Create a faked Mirrored object for testing.
All components of the returned Mirrored have the same objects, which is not
true in reality.
"""
devices = _get_devices(devices)
values = []
for d in devices:
with ops.device(d):
values.append(array_ops.identity(value))
return distribute_utils.regroup(
values,
wrap_class=value_lib.Mirrored)
def _make_index | ed_slices(values, indices, dense_shape, device):
with ops.device(device):
tensor = ops.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _make_mirrored_indexed_slices(devices, values, indices, dense_shape):
values = [_make_indexed_slices(values, indices, dense_shape, d)
for d in devices]
return distribute_utils.regroup(
values,
wrap_class=value_lib.Mirrored)
_cpu_device = "/device:CPU:0"
class CrossDeviceOpsTestBase(test.TestCase, parameterized.TestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertIsInstance(left, ops.IndexedSlices)
self.assertIsInstance(right, ops.IndexedSlices)
self.assertEqual(
device_util.resolve(left.device), device_util.resolve(right.device))
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def _assert_mirrored_equal(self,
left_list,
right_list,
sess=None,
run_options=None):
if not isinstance(left_list, list):
left_list, right_list = [left_list], [right_list]
for left, right in zip(left_list, right_list):
self.assertEqual(type(left), type(right))
# Convert Mirrored to a list since sess.run(Mirrored) only returns one
# value.
if isinstance(left, value_lib.Mirrored):
left, right = left.values, right.values
else:
# When there's only one replica Mirrored is automatically unwrapped.
left, right = [left], [right]
for left_value, right_value in zip(left, right):
self.assertEqual(
device_util.resolve(left_value.device),
device_util.resolve(right_value.device))
# Densify IndexedSlices.
left = [ops.convert_to_tensor(v) for v in left]
right = [ops.convert_to_tensor(v) for v in right]
if not context.executing_eagerly():
left, right = sess.run((left, right), options=run_options)
for left_value, right_value in zip(left, right):
self.assertAllEqual(left_value, right_value)
def _testReductionAndBroadcast(self, cross_device_ops, devices):
if context.num_gpus() < sum(1 for d in devices if "GPU" in d.upper()):
self.skipTest("Not enough GPUs")
with self.cached_session() as sess:
values = [constant_op.constant(float(d)) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices)
mean = (len(devices) - 1.) / 2.
values_2 = [constant_op.constant(d + 1.0) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = mean + 1.
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1.,
device_util.resolve(_cpu_device))
destination_str = device_util.resolve(_cpu_device)
all_destinations = [
destination_mirrored,
destination_different,
destination_str,
]
# test reduce()
for destinations in all_destinations:
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations), _fake_mirrored(mean, destinations),
sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices), destinations), sess)
self._assert_mirrored_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations), sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_mirrored_equal(
cross_device_ops.batch_reduce(reduce_util.ReduceOp.MEAN,
[(per_replica, d1),
|
jencce/stuff | py/first.py | Python | gpl-2.0 | 123 | 0 | #! /usr/bin/env python
current = | u'b'
print ord(current)
print ord('a')
print ord('1')
print ord('\n')
ip 1 2 3 |
./first.py
|
mldbai/mldb | testing/MLDB-2107-scalar-format.py | Python | apache-2.0 | 2,377 | 0.00589 | #
# MLDB-2107-scalar-format.py
# Mathieu Marquis Bolduc, 2017-01-10
# This file is part of MLDB. Copyright 2017 mldb.ai inc. All rights reserved.
#
from mldb import mldb, MldbUnitTest, ResponseException
class MLDB2107ScalarFormatTest(MldbUnitTest): # noqa
@classmethod
def setUpClass(cls):
ds = mldb.create_dataset({'id' : 'ds', 'type' : 'sparse.mutable'})
ds.record_row('row0', [['x', 'A', 0]])
ds.record_row('row1', [['x', 'B', 0]])
ds.commit()
def test_int(self):
n = mldb.get('/v1/query', q="select x from (select 17 as x)", format='atom').json()
self.assertEqual(17, n)
def test_float(self):
n = mldb.get('/v1/query', q="select x from (select 2.3 as x)", format='atom').json()
self.assertEqual(2.3, n)
def test_string(self):
n = mldb.get('/v1/query', q="select x from (select 'blah' as x)", format='atom').json()
self.assertEqual('blah', n)
def test_bool(self):
n = mldb.get('/v1/query', q="select x from (select false as x)", format='atom').json()
self.assertEqual(False, n)
def test_error_columns(self):
msg = "Query with atom format returned multiple columns"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="se | lect x,y from (select false as x, 1 as y)", format='atom').json()
def test_error_rows(self):
msg = "Query with atom format return | ing multiple rows"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select x from ds", format='atom').json()
def test_multiple_rows_limit(self):
n = mldb.get('/v1/query', q="select x from ds limit 1", format='atom').json()
self.assertEqual('B', n)
def test_error_no_rows(self):
msg = "Query with atom format returned no rows."
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select x from ds where x = 'patate'", format='atom').json()
def test_error_no_column(self):
msg = "Query with atom format returned no column"
with self.assertRaisesRegex(ResponseException, msg):
n = mldb.get('/v1/query', q="select COLUMN EXPR (WHERE columnName() IN ('Z')) from (select 17 as x)", format='atom').json()
if __name__ == '__main__':
mldb.run_tests()
|
sietse/pyietflib | TestSuite/rfc2045TestSuite/__init__.py | Python | apache-2.0 | 1,363 | 0.004402 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#-----------------------------------------------------------------------------
"""IETF RFC 2046 Language Tag Specification unit test suite."""
__author__ = ('Lance Finn Helsten',)
__ver | sion__ = '1.0'
__copyright__ = """Copyright 2011 Lance Finn Helsten (helsten@ac | m.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "reStructuredText en"
import os
import unittest
from TestSuite import utils
def test_suite():
return unittest.defaultTestLoader.discover(os.path.dirname(__file__), pattern='test_*')
def smoke_suite():
suite = utils.smoke_suite(os.path.dirname(__file__))
return suite
def sanity_suite():
suite = utils.sanity_suite(os.path.dirname(__file__))
return suite
def shakedown_suite():
suite = utils.shakedown_suite(os.path.dirname(__file__))
return suite
|
mikeireland/chronostar | projects/scocen/print_components_overlaps_table_for_paper.py | Python | mit | 1,817 | 0.017611 | """
Print data for table `componentsoverlaps.tex` in the paper
Numbers of members
Ages, crossing time
"""
import numpy as np
from astropy.table import Table, unique
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
comps_filename = lib.comps_filename
compnames = lib.compnames
good_comps = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'T', 'U']
############################################
# Minimal probability required for membership
pmin_membership = 0.5
pmin_memberships = [0.5, 0.8, 0.9]
############################################
tab = Table.read(data_filename)
comps = Table.read(comps_filename)
#~ print(len(tab), 'before unique')
#~ tab = unique(tab, keys='source_id')
#~ print(len(tab), 'after unique')
total50=0
total80=0
total90=0
for c in comps:
comp_ID = c['comp_ID']
if comp_ID not in good_comps:
continue
line = '%s & '%c['comp_ID']
for pmin_membership in pmin_memberships:
mask=tab['membership%s'%comp_ID]>pmin_membership
t=tab[mask]
line += '%d & '%len(t)
if comp_I | D not in ['H', 'I', 'B']: # Not ScoCen
if pmin_membership==0.5:
total50+=len(t)
e | lif pmin_membership==0.8:
total80+=len(t)
elif pmin_membership==0.9:
total90+=len(t)
age = c['Age']
crossing_time = c['Crossing_time']
# %.0f does the roundup properly while %d rounds everything down!
line += '%0.f & %.0f '%(age, crossing_time)
line += ' \\\\'
print(line)
line = 'Total & %d & %d & %d & & \\\\'% (total50, total80, total90)
print(line)
|
Weasyl/misaka | misaka/constants.py | Python | mit | 406 | 0.002463 | # -*- coding: utf-8 -*-
import sys
from inspect import getmembers
from ._hoedown import lib
def _set_constants():
is_int = lambda n: isinstance(n, int)
for name, value in getmembers(lib, is_int):
if not name.startswith('HOEDOWN_'):
continue
setattr(sys.modules[__name__] | , name[8:] | , value)
if not hasattr(sys.modules[__name__], 'EXT_TABLES'):
_set_constants()
|
zaycev/n7 | n7/web/urls.py | Python | mit | 586 | 0.001706 | #!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
from django.conf.urls import url
from django.conf.urls import patterns
| from django.shortcuts import redirect
urlpatterns = patterns(
"",
url(r"^$", "n7.web. | n7.views.demo", name="demo"),
url(r"^triples/$", "n7.web.n7.views.trainer", name="trainer"),
url(r"^novels/$", "n7.web.n7.views.trainer_add", name="trainer_post"),
)
|
pankajp/pyface | pyface/ui/qt4/code_editor/gutters.py | Python | bsd-3-clause | 3,820 | 0.003141 | #------------------------------------------------------------------------------
# Copyright (c) 2010, Enthought Inc
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD license.
#
# Author: Enthought Inc
# Description: <Enthought pyface code editor>
#------------------------------------------------------------------------------
import math
from pyface.qt import QtCore, QtGui
class GutterWidget(QtGui.QWidget):
min_width = 5
background_color = QtGui.QColor(220, 220, 220)
def sizeHint(self):
return QtCore.QSize(self.min_width, 0)
def paintEvent(self, event):
""" Paint the line numbers.
"""
painter = QtGui.QPainter(self)
painter.fillRect(event.rect(), QtCore.Qt.lightGray)
def wheelEvent(self, event):
""" Delegate mouse wheel events to parent for seamless scrolling.
"""
self.parent().wheelEvent(event)
class StatusGutterWidget(GutterWidget):
""" Draws status markers
"""
def __init__(self, *args, **kw):
super(StatusGutterWidget, self).__init__(*args, **kw)
self.error_lines = []
self.warn_lines = []
self.info_lines = []
def sizeHint(self):
return QtCore.QSize(10, 0)
def paintEvent(self, event):
""" Paint the line numbers.
"""
painter = QtGui.QPainter(self)
painter.fillRect(event.rect(), self.background_color)
cw = self.parent()
pixels_per_block = self.height()/float(cw.blockCount())
for line in self.info_lines:
painter.fillRect(QtCore.QRect(0, line*pixels_per_block, self.width(), 3),
QtCore.Qt.green)
for line in self.warn_lines:
painter.fillRect(QtCore.QRect(0, line*pixels_per_block, self.width(), 3),
QtCore.Qt.yellow)
for line in self.error_lines:
painter.fillRect(QtCore.QRect(0, line*pixels_per_block, self.width(), 3),
QtCore.Qt.red)
class LineNumberWidget(GutterWidget):
""" Draw line numbers.
"""
min_char_width = 4
def fontMetrics(self):
# QWidget's fontMetrics method does not provide an up to date
# font metrics, just one corresponding to the initial font
return QtGui.QFontMetrics(self.font)
def set_font(self, font):
self.font = font
def digits_width(self):
nlines = max(1, self.parent().blockCount())
ndigits = max(self.min_char_width,
int(math.floor(math.log10(nlines) + 1)))
width = max(self.fontMetrics().width(u'0' * ndigits) + 3,
self.min_width)
return width
def sizeHint(self):
return QtCore.QSize(self.digits_width(), 0)
def paintEvent(self, event):
""" Paint the line numbers.
"""
painter = QtGui.QPainter(self)
painter.setFont(self.font)
| painter.fillRect(event.rect(), self.background_color)
cw = self.parent()
block = cw.firstVisibleBlock()
blocknum = block.blockNumber()
top = cw.blockBoundingGeometry(block).translated(
cw.contentOffset()).top()
bottom = top + int(cw.blockBoundingRect(block).height())
while block.isValid() and top <= event.rect().bottom():
if block.isVisible() and bottom >= ev | ent.rect().top():
painter.setPen(QtCore.Qt.black)
painter.drawText(0, top, self.width() - 2,
self.fontMetrics().height(),
QtCore.Qt.AlignRight, str(blocknum + 1))
block = block.next()
top = bottom
bottom = top + int(cw.blockBoundingRect(block).height())
blocknum += 1
|
sharkspeed/dororis | packages/python/flask/flask-dog-book/5-chapter/main.py | Python | bsd-2-clause | 2,710 | 0.001864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask import Flask, render_template, session, redirect, url_for, flash
from flask_wtf import FlaskForm
from flask_bootstrap import Bootstrap
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'had to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
Bootstrap(app)
db = SQLAlchemy(app)
manager = Manager(app)
migrate = Migrate(app, db)
class NameForm(FlaskForm):
name = StringField('What is your name? ', validators=[DataRequired()])
submit = SubmitField('Submit :233')
# model definition
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column( | db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
# View Functions
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('名字已经修改完成!')
user = User.query.filter_by(use | rname=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
session['known'] = False
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html',
form=form,
name=session.get('name', None),
known=session.get('known', False))
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
if __name__ == "__main__":
# app.run(debug=True)
# 自动在 shell 中导入 app db User Role
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.run()
|
rjschwei/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/models/availability_set.py | Python | mit | 3,239 | 0.001544 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AvailabilitySet(Resource):
"""Create or update availability set parameters.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param platform_update_domain_count: Update Domain count.
:type platform_update_domain_count: int
:param platform_fault_domain_count: Fa | ult Domain count.
:type platform_fault_domain_count: int
:par | am virtual_machines: A list of references to all virtual machines in
the availability set.
:type virtual_machines: list of :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
:ivar statuses: The resource status information.
:vartype statuses: list of :class:`InstanceViewStatus
<azure.mgmt.compute.models.InstanceViewStatus>`
:param managed: If the availability set supports managed disks.
:type managed: bool
:param sku: Sku of the availability set
:type sku: :class:`Sku <azure.mgmt.compute.models.Sku>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'statuses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
'managed': {'key': 'properties.managed', 'type': 'bool'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(self, location, tags=None, platform_update_domain_count=None, platform_fault_domain_count=None, virtual_machines=None, managed=None, sku=None):
super(AvailabilitySet, self).__init__(location=location, tags=tags)
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.statuses = None
self.managed = managed
self.sku = sku
|
Lilykos/invenio | invenio/modules/jsonalchemy/jsonext/engines/sqlalchemy.py | Python | gpl-2.0 | 5,159 | 0.002714 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the |
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundatio | n, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""SQLAlchemy storage engine implementation."""
import six
from flask.helpers import locked_cached_property
from werkzeug import import_string
from invenio.modules.jsonalchemy.storage import Storage
class SQLAlchemyStorage(Storage):
"""Implement database backend for SQLAlchemy model storage."""
# FIXME: This storage engine should use transactions!
def __init__(self, model, **kwards):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.__init__`."""
self.__db = kwards.get('sqlalchemy_backend',
'invenio.ext.sqlalchemy:db')
self.__model = model
@locked_cached_property
def db(self):
"""Return SQLAlchemy database object."""
if isinstance(self.__db, six.string_types):
self.__db = import_string(self.__db)
if not self.__db.engine.dialect.has_table(self.__db.engine,
self.model.__tablename__):
self.model.__table__.create(bind=self.__db.engine)
self.__db.session.commit()
return self.__db
@locked_cached_property
def model(self):
"""Return SQLAchemy model."""
if isinstance(self.__model, six.string_types):
return import_string(self.__model)
return self.__model
def save_one(self, json, id=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.save_one`."""
if id is None:
id = json['_id']
self.db.session.add(self.model(id=id, json=json))
self.db.session.commit()
def save_many(self, jsons, ids=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.save_many`."""
if ids is None:
ids = map(lambda j: j['_id'], jsons)
self.db.session.add_all([self.model(id=id, json=json)
for id, json in zip(ids, jsons)])
self.db.session.commit()
def update_one(self, json, id=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.update_one`."""
#FIXME: what if we get only the fields that have change
if id is None:
id = json['_id']
self.db.session.merge(self.model(id=id, json=json))
self.db.session.commit()
def update_many(self, jsons, ids=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.update_many`."""
#FIXME: what if we get only the fields that have change
if ids is None:
ids = map(lambda j: j['_id'], jsons)
for id, json in zip(ids, jsons):
self.db.session.merge(self.model(id=id, json=json))
self.db.session.commit()
def get_one(self, id):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_one`."""
return self.db.session.query(self.model.json)\
.filter_by(id=id).one().json
def get_many(self, ids):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_many`."""
for json in self.db.session.query(self.model.json)\
.filter(self.model.id.in_(ids))\
.all():
yield json[0]
def get_field_values(self, recids, field, repetitive_values=True, count=False,
include_recid=False, split_by=0):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_field_values`."""
#TODO
raise NotImplementedError()
def get_fields_values(self, recids, fields, repetitive_values=True, count=False,
include_recid=False, split_by=0):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_fields_values`."""
#TODO
raise NotImplementedError()
def search(self, query):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.search`."""
raise NotImplementedError()
def create(self):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.create`."""
if self.db.engine.dialect.has_table(
self.db.engine.connect(),
self.model.__tablename__):
assert self.model.query.count() == 0
else:
self.model.__table__.create(bind=self.db.engine)
def drop(self):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.create`."""
self.model.__table__.drop(bind=self.db.engine)
|
yongwen/teletraan | deploy-board/deploy_board/webapp/helpers/tags_helper.py | Python | apache-2.0 | 1,031 | 0.007759 | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from deploy_board.webapp.helpers.deployclient import DeployClient
deploy_client = DeployClient()
#In sync with deploy-service/common/src/main/java/com/pinterest/deployservice/bean/TagValue.j | ava
class TagValue(object):
BAD_BUILD="BAD_BUILD"
GOOD_BUILD="GOOD_ | BUILD"
def get_latest_by_target_id(request, target_id):
return deploy_client.get("/tags/targets/%s/latest" % target_id, request.teletraan_user_id.token)
|
hamukichi/ironpycompiler | ironpycompiler/detect.py | Python | mit | 11,741 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module for detecting where the IronPython executables exist.
"""
import itertools
import os
import glob
# Original modules
from . import exceptions
from . import constants
from . import datatypes
from . import process
def search_ipy_reg(regkeys=None, executable=constants.EXECUTABLE,
detailed=False):
"""Search for IronPython regisitry keys.
This function searches for IronPython keys in the Windows registry,
and returns a dictionary showing the versions of IronPython and their
locations (the paths to the IronPython directories).
:param list regkeys: (optional) The IronPython registry keys that
should be looked for.
:param str executable: (optional) The name of the IronPython
executable.
:param bool detailed: (optional) If this parameter is true, the key of the
dictionary will be an instance of
:class:`ironpycompiler.datatypes.HashableVersion`
instead of string, in order to provide detailed
information of versions.
:return: The versions of IronPython and their locations
:rtype: dict
:raises ironpycompiler.exceptions.IronPythonDetectionError: if IronPython
keys cannot be
found
.. versionadded:: 0.9.0
.. versionchanged:: 0.10.1
Solved the problem that the default value for the argument ``regkeys``
was mutable.
.. versionchanged:: 1.0.0
Validates the found executables using :func:`validate_pythonexe`. The
parameters ``detailed`` and ``executable`` were added.
"""
if regkeys is None:
regkeys = constants.REGKEYS
try:
import _winreg
except ImportError:
raise exceptions.IronPythonDetectionError(
msg="Cannot import a module for accessing the Windows registry.")
foundipys = dict()
ipybasekey = None
# IronPythonキーを読み込む
for key in regkeys:
try:
ipybasekey = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE, key)
except WindowsError:
continue
else:
break
if ipybasekey is None:
ipybasekey.Close()
raise exceptions.IronPythonDetectionError(
msg="Could not find any IronPython registry key.")
else:
itr = itertools.count()
foundvers = []
for idx in itr:
try:
foundvers.append(_winreg.EnumKey(ipybasekey, idx))
except WindowsError: # 対応するサブキーがなくなったら
break
foundipys = dict()
for ver in foundvers:
ipypathkey = _winreg.OpenKey(ipybasekey,
ver + "\\InstallPath")
ipy_dir = os.path.dirname(_winreg.QueryValue(ipypathkey, None))
ipy_exe = os.path.abspath(os.path.join(ipy_dir, executable))
try:
ipy_ver = validate_pythonexe(ipy_exe)
except exceptions.IronPythonValidationError:
continue
else:
if detailed:
foundipys[ipy_ver] = ipy_dir
else:
foundipys[ipy_ver.major_minor()] = ipy_dir
finally:
ipypathkey.Close()
ipybasekey.Close()
if len(foundipys) == 0:
raise exceptions.IronPythonDetectionError(
msg="Could not find any IronPython executable.")
return foundipys
def search_ipy_env(executable=constants.EXECUTABLE, detailed=False):
"""Search for IronPython directories included in the PATH variable.
This function searches for IronPython executables in your system,
reading the PATH environment variable, and gets their version
numbers, executing the executables.
This function returns a dictionary showing the versions of
IronPython and their locations (the paths to the IronPython
directories).
:param str executable: (optional) The name of the IronPython
executable.
:param bool detailed: (optional) If this parameter is true, the key of the
dictionary will be an instance of
:class:`ironpycompiler.datatypes.HashableVersion`
instead of string, in order to provide detailed
information of versions.
:return: The versions of IronPyt | hon and their locations
:rtype: dict
:raises ironpycompiler.exceptions.IronPyth | onDetectionError: if IronPython
cannot be found
.. versionadded:: 0.9.0
.. versionchanged:: 1.0.0
Validates the found executables using :func:`validate_pythonexe`. The
parameter ``detailed`` was added.
"""
ipydirpaths = []
foundipys = {}
for path in os.environ["PATH"].split(os.pathsep):
for match_path in glob.glob(os.path.join(path, executable)):
if os.access(match_path, os.X_OK):
ipydirpaths.append(os.path.dirname(match_path))
if len(ipydirpaths) == 0:
raise exceptions.IronPythonDetectionError(
msg="Could not find any executable file named %s." % executable)
for directory in ipydirpaths:
ipy_exe = os.path.abspath(os.path.join(directory, executable))
try:
ipy_ver = validate_pythonexe(ipy_exe)
except exceptions.IronPythonValidationError:
continue
else:
if detailed:
foundipys[ipy_ver] = directory
else:
foundipys[ipy_ver.major_minor()] = directory
if len(foundipys) == 0:
raise exceptions.IronPythonDetectionError(
msg=("{} exists but is not the IronPython executable."
).format(executable))
else:
return foundipys
def search_ipy(regkeys=None, executable=constants.EXECUTABLE, detailed=False):
"""Search for IronPython directories.
This function searches for IronPython directories using both
:func:`search_ipy_env` and :func:`search_ipy_reg`, and returns a
dictionary showing the versions of IronPython and their locations
(the paths to the IronPython directories).
:param str executable: (optional) The name of the IronPython
executable.
:param list regkeys: (optional) The IronPython registry keys that
should be looked for.
:param bool detailed: (optional) If this parameter is true, the key of the
dictionary will be an instance of
:class:`ironpycompiler.datatypes.HashableVersion`
instead of string, in order to provide detailed
information of versions.
:return: The versions of IronPython and their locations
:rtype: dict
.. versionadded:: 0.9.0
.. versionchanged:: 0.10.1
Solved the problem that the default value for the argument ``regkeys``
was mutable.
.. versionchanged:: 1.0.0
The parameter ``detailed`` was added.
"""
if regkeys is None:
regkeys = constants.REGKEYS
try:
foundipys = search_ipy_reg(regkeys, executable, detailed)
except exceptions.IronPythonDetectionError:
foundipys = dict()
try:
envipys = search_ipy_env(executable, detailed)
except exceptions.IronPythonDetectionError:
envipys = dict()
for k, v in envipys.items():
if k not in foundipys:
foundipys[k] = v
if len(foundipys) == 0:
raise exceptions.IronPythonDetectionError(
msg="Could not find any IronPython directory.")
else:
return foundipys
def auto_detect(detailed=False):
"""Decide the optimum version of IronPython in your system.
This function decides the most suitable version of IronPython
in your system for the version of CPython on which IronPyComp |
freizeitnerd/sprinkler | www/cgi-bin/cgi-switch-valve.py | Python | mit | 222 | 0.040541 | #!/usr/bin/python
def renderJson(status, body)
print "Status: ", status
print "Co | ntent-Type: application/json"
print "C | ontent-Length: %d" % (len(body))
print ""
print body
renderJson("200 OK", "{foo: 'bar'}") |
TelematicaUSM/EduRT | backend_modules/__init__.py | Python | agpl-3.0 | 902 | 0 | # -*- coding: UTF-8 -*-
# COPYRIGHT (c) 2016 Cristóbal Ganter
#
# GNU AFFERO GENERAL PUBLIC LICENSE
# Version 3, 19 November 2007
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This pr | ogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| from controller import MSGHandler
from src.load import load_wsclasses
load_wsclasses(__name__, MSGHandler)
|
magenta-aps/mox | oio_rest/oio_rest/utils/encode_token.py | Python | mpl-2.0 | 893 | 0 | # Copyright (C) 2015-2019 Magenta ApS, https://magenta.dk.
# Contact: info@magenta.dk.
#
# Th | is Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import shutil
import os
import tempfile
from base64 import b64encode
import gzip
# Outputs the Authorization headers for the given SAML a | ssertion token
if len(sys.argv) > 1:
assertion_file = sys.argv[1]
else:
assertion_file = 'test_auth_data/sample-saml2-assertion.xml'
(handle, tmpfilename) = tempfile.mkstemp('.gz')
with open(assertion_file, 'rb') as f_in, gzip.open(tmpfilename, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
with open(tmpfilename) as f:
zipped_data = f.read()
print("Authorization: saml-gzipped %s" % b64encode(zipped_data))
os.remove(tmpfilename)
|
achilleas-k/gnome15 | src/gnome15/util/g15os.py | Python | gpl-3.0 | 4,161 | 0.006008 | # Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2010 Brett Smith <tanktarta@blueyonder.co.uk>
# Copyright (C) 2013 Nuno Araujo <nuno.araujo@russo79.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Gnome15 utilities to work with the system (running commands, manipulating the
filesystem, getting OS information...)
'''
from gnome15 import g15globals
import os
# Logging
import logging
logger = logging.getLogger(__name__)
def run_script(script, args = None, background = True):
"""
Runs a python script from the scripts directory.
Keyword arguments:
script: the filename of the script to run
args: an array of arguments to pass to the script (optional, None by default)
background: Set to run the script in the background (optional, True by default)
"""
a = ""
if args:
for arg in args:
a += "\"%s\"" % arg
p = os.path.realpath(os.path.join(g15globals.scripts_dir,script))
logger.info("Running '%s'", p)
return os.system("\"%s\" %s %s" % ( p, a, " &" if background else "" ))
def get_command_output(cmd):
"""
Runs a command on the shell and returns it's status code and output
Keyword arguments:
cmd: the command to run (either full path, or just the name if the command
is in the %PATH)
Returns
A tuple with the exit code of the command and the output made on stdout by
the command.
Note: the last '\n' is stripped from the output.
"""
pipe = os.popen('{ ' + cmd + '; } 2>/dev/null', 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
def mkdir_p(path):
"""
Creates a directory and it's parents if needed unless it already exists..
Keyword arguments:
path: the full path to the directory to create.
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
logger.debug("Error when trying to create path %s", path, exc_info = exc)
| import errno
if exc.errno == errno.EEXIST:
pass
else: raise
def full_path_of_program(program_name):
"""
Search for program_name in all the directories declared in the PATH
environment variable
Keyword arguments:
program_name: the name of the program to search for
Returns:
Full path name of the program_name, None if program_name was not
found | in PATH.
"""
for dir in os.environ['PATH'].split(':'):
full_path = os.path.join(dir, program_name)
if os.path.exists(full_path):
return full_path
return None
def is_program_in_path(program_name):
"""
Checks if a program_name is available in PATH environment variable
Keyword arguments:
program_name: the name of the program to check
Returns True if program_name is in PATH, else False
"""
return full_path_of_program(program_name) != None
def get_lsb_release():
"""
Gets the release number of the distribution
Return:
ret: Return code of the lsb_release command
r: The release number
"""
ret, r = get_command_output('lsb_release -rs')
return float(r) if ret == 0 else 0
def get_lsb_distributor():
"""
Gets the Linux distribution distributor id
Return:
ret: Return code of the lsb_release command
r: The distributor id or "Unknown" if an error occurred
"""
ret, r = get_command_output('lsb_release -is')
return r if ret == 0 else "Unknown"
|
dimka665/vk | vk/__init__.py | Python | mit | 113 | 0 | fr | om .session import API, UserAPI, CommunityAPI
__version__ = '3.0-dev'
__all__ = (API, UserAPI, CommunityAPI) | |
zeeman/cyder | cyder/cydhcp/build/builder.py | Python | bsd-3-clause | 5,264 | 0.00057 | from __future__ import unicode_literals
import os
import shlex
import subprocess
import sys
import syslog
import time
from traceback import format_exception
from cyder.base.mixins import MutexMixin
from cyder.base.utils import (
copy_tree, dict_merge, Logger, run_command, set_attrs, shell_out)
from cyder.base.vcs import GitRepo
from cyder.core.utils import fail_mail
from cyder.core.ctnr.models import Ctnr
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.vrf.models import Vrf
from cyder.cydhcp.workgroup.models import Workgroup
from cyder.settings import DHCPBUILD
class DHCPBuilder(MutexMixin, Logger):
def __init__(self, *args, **kwargs):
kwargs = dict_merge(DHCPBUILD, {
'quiet': False,
'verbose': False,
'to_syslog': False,
}, kwargs)
set_attrs(self, kwargs)
self.repo = GitRepo(
self.prod_dir, self.line_change_limit, self.line_removal_limit,
logger=self)
def log(self, log_level, msg):
if self.to_syslog:
for line in msg.splitlines():
syslog.syslog(log_level, line)
def log_debug(self, msg):
self.log(syslog.LOG_DEBUG, msg)
if self.verbose:
print msg
def log_info(self, msg):
self.log(syslog.LOG_INFO, msg)
if not self.quiet:
print msg
def log_notice(self, msg):
self.log(syslog.LOG_NOTICE, msg)
if not self.quiet:
print msg
def error(self, msg):
self.log(syslog.LOG_ERR, msg)
raise Exception(msg)
def run_command(self, command, log=True, failure_msg=None):
if log:
command_logger = self.log_debug
failure_logger = self.log_err
else:
command_logger = None
failure_logger = None
return run_command(command, command_logger=command_logger,
failure_logger=failure_logger,
failure_msg=failure_msg)
def build(self):
try:
with open(self.stop_file) as stop_fd:
now = time.time()
contents = stop_fd.read()
last = os.path.getmtime(self.stop_file)
msg = ('The stop file ({0}) exists. Build canceled.\n'
'Reason for skipped build:\n'
'{1}'.format(self.stop_file, contents))
self.log_notice(msg, to_stderr=False)
if (self.stop_file_email_interval is not None and
now - last > self.stop_file_email_interval):
os.utime(self.stop_file, (now, now))
fail_mail(msg, subject="DHCP builds have stopped")
raise Exception(msg)
except IOError as e:
if e.errno == 2: # IOError: [Errno 2] No such file or directory
pass
else:
raise
self.log_info('Building...')
try:
with open(os.path.join(self.stage_dir, self.target_file), 'w') \
as f:
for ctnr in Ctnr.objects.all():
f.write(ctnr.build_legacy_classes())
for vrf in Vrf.objects.all():
f.write(vrf.build_vrf())
for network in Network.objects.filter(enabled=True):
f.write(network.build_subnet())
for workgroup in Workgroup.objects.all():
f.write(workgroup.build_workgroup())
except:
self.log(syslog.LOG_ERR,
'DHCP build failed.\nOriginal exception: ' + e. | message)
raise
if self.check_file:
self.check_syntax()
self.log_info('DHCP build successful')
def push(self, sanity_check=True):
self.repo.reset_and_pull()
try:
copy_tree(s | elf.stage_dir, self.prod_dir)
except:
self.repo.reset_to_head()
raise
self.repo.commit_and_push('Update config', sanity_check=sanity_check)
def error(self):
ei = sys.exc_info()
exc_msg = ''.join(format_exception(*ei)).rstrip('\n')
self.log_err(
'DHCP build failed.\nOriginal exception: ' + exc_msg,
to_stderr=False)
raise
def check_syntax(self):
out, err, ret = run_command("{0} -t -cf {1}".format(
self.dhcpd, os.path.join(self.stage_dir, self.check_file)
))
if ret != 0:
log_msg = 'DHCP build failed due to a syntax error'
exception_msg = log_msg + ('\n{0} said:\n{1}'
.format(self.dhcpd, err))
self.log_err(log_msg, to_stderr=False)
raise Exception(exception_msg)
def _lock_failure(self, pid):
self.log_err(
'Failed to acquire lock on {0}. Process {1} currently '
'has it.'.format(self.lock_file, pid),
to_stderr=False)
fail_mail(
'An attempt was made to start the DHCP build script while an '
'instance of the script was already running. The attempt was '
'denied.',
subject="Concurrent DHCP builds attempted.")
super(DHCPBuilder, self)._lock_failure(pid)
|
endlessm/chromium-browser | third_party/chromite/scripts/cros_env_whitelist.py | Python | bsd-3-clause | 469 | 0.002132 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a B | SD-style license that can be
# found in the LICENSE file.
"""Print the environment whitelist."""
from __future__ import print_function
import sys
from chromite.lib import constants
ass | ert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def main(_argv):
print(' '.join(constants.CHROOT_ENVIRONMENT_WHITELIST))
|
divio/cmsplugin-blog | cmsplugin_blog/migrations/0015_auto__add_field_latestentriesplug.py | Python | bsd-3-clause | 7,566 | 0.008062 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'LatestEntriesPlugin.tagged'
db.add_column('cmsplugin_latestentriesplugin', 'tagged', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'EntryTitle', fields ['slug', 'language']
db.delete_unique('cmsplugin_blog_entrytitle', ['slug', 'language'])
# Deleting field 'LatestEntriesPlugin.tagged'
db.delete_column('cmsplugin_latestentriesplugin', 'tagged')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharF | ield', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerFi | eld', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_blog.entry': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Entry'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'placeholders': ('djangocms_utils.fields.M2MPlaceholderField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {})
},
'cmsplugin_blog.entrytitle': {
'Meta': {'unique_together': "(('language', 'slug'),)", 'object_name': 'EntryTitle'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_blog.Entry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cmsplugin_blog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', 'db_table': "'cmsplugin_latestentriesplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_language_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {}),
'tagged': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cmsplugin_blog']
|
adityahase/frappe | frappe/boot.py | Python | mit | 10,547 | 0.025505 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, text_type
"""
bootstrap client session
"""
import frappe
import frappe.defaults
import frappe.desk.desk_page
from frappe.desk.form.load import get_meta_bundle
from frappe.utils.change_log import get_versions
from frappe.translate import get_lang_dict
from frappe.email.inbox import get_email_accounts
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
from frappe.website.doctype.web_page_view.web_page_view import is_tracking_enabled
from frappe.social.doctype.energy_point_log.energy_point_log import get_energy_points
from frappe.model.base_document import get_controller
from frappe.social.doctype.post.post import frequently_visited_links
from frappe.core.doctype.navbar_settings.navbar_settings import get_navbar_settings
def get_bootinfo():
"""build and return boot info"""
frappe.set_user_lang(frappe.session.user)
bootinfo = frappe._dict()
hooks = frappe.get_hooks()
doclist = []
# user
get_user(bootinfo)
# system info
bootinfo.sitename = frappe.local.site
bootinfo.sysdefaults = frappe.defaults.get_defaults()
bootinfo.server_date = frappe.utils.nowdate()
if frappe.session['user'] != 'Guest':
bootinfo.user_info = get_fullnames()
bootinfo.sid = frappe.session['sid']
bootinfo.modules = {}
bootinfo.module_list = []
load_desktop_data(bootinfo)
bootinfo.letter_heads = get_letter_heads()
bootinfo.active_domains = frappe.get_active_domains()
bootinfo.all_domains = [d.get("name") for d in frappe.get_all("Domain")]
bootinfo.module_app = frappe.local.module_app
bootinfo.single_types = [d.name for d in frappe.get_all('DocType', {'issingle': 1})]
bootinfo.nested_set_doctypes = [d.parent for d in frappe.get_all('DocField', {'fieldname': 'lft'}, ['parent'])]
add_home_page(bootinfo, doclist)
bootinfo.page_info = get_allowed_pages()
load_translations(bootinfo)
add_timezone_info(bootinfo)
load_conf_settings(bootinfo)
load_print(bootinfo, doclist)
doclist.extend(get_meta_bundle("Page"))
bootinfo.home_folder = frappe.db.get_value("File", {"is_home_folder": 1})
bootinfo.navbar_settings = get_navbar_settings()
# ipinfo
if frappe.session.data.get('ipinfo'):
bootinfo.ipinfo = frappe.session['data']['ipinfo']
# add docs
bootinfo.docs = doclist
for method in hooks.boot_session or []:
frappe.get_attr(method)(bootinfo)
if bootinfo.lang:
bootinfo.lang = text_type(bootinfo.lang)
bootinfo.versions = {k: v['version'] for k, v in get_versions().items()}
bootinfo.error_report_email = frappe.conf.error_report_email
bootinfo.calendars = sorted(frappe.get_hooks("calendars"))
bootinfo.treeviews = frappe.get_hooks("treeviews") or []
bootinfo.lang_dict = get_lang_dict()
bootinfo.success_action = get_success_action()
bootinfo.update(get_email_accounts(user=frappe.session.user))
bootinfo.energy_points_enabled = is_energy_point_enabled()
bootinfo.website_tracking_enabled = is_tracking_enabled()
bootinfo.points = get_energy_points(frappe.session.user)
bootinfo.frequently_visited_links = frequently_visited_links()
bootinfo.link_preview_doctypes = get_link_preview_doctypes()
bootinfo.additional_filters_config = get_additional_filters_from_hooks()
return bootinfo
def get_letter_heads():
letter_heads = {}
for letter_head in frappe.get_all("Letter Head", fields = ["name", "content", "footer"]):
letter_heads.setdefault(letter_head.name,
{'header': letter_head.content, 'footer': letter_head.footer})
return letter_heads
def load_conf_settings(bootinfo):
from frappe import conf
bootinfo.max_file_size = conf.get('max_file_size') or 10485760
for key in ('developer_mode', 'socketio_port', 'file_watcher_port'):
if key in conf: bootinfo[key] = conf.get(key)
def load_desktop_data(bootinfo):
from frappe.config import get_modules_from_all_apps_for_user
from frappe.desk.desktop import get_desk_sidebar_items
bootinfo.allowed_modules = get_modules_from_all_apps_for_user()
bootinfo.allowed_workspaces = get_desk_sidebar_items(flatten=True, cache=False)
bootinfo.module_page_map = get_controller("Desk Page").get_module_page_map()
bootinfo.dashboards = frappe.get_all("Dashboard")
def get_allowed_pages(cache=False):
return get_user_pages_or_reports('Page', cache=cache)
def get_allowed_reports(cache=False):
return get_user_pages_or_reports('Report', cache=cache)
def get_user_pages_or_reports(parent, cache=False):
_cache = frappe.cache()
if cache:
has_role = _cache.get_value('has_role:' + parent, user=frappe.session.user)
if has_role:
return has_role
roles = frappe.get_roles()
has_role = {}
column = get_column(parent)
# get pages or reports set on custom role
pages_with_custom_roles = frappe.db.sql("""
select
`tabCustom Role`.{field} as name,
`tabCustom Role`.modified,
`tabCustom Role`.ref_doctype,
{column}
from `tabCustom Role`, `tabHas Role`, `tab{parent}`
where
`tabHas Role`.parent = `tabCustom Role`.name
and `tab{parent}`.name = `tabCustom Role`.{field}
and `tabCustom Role`.{field} is not null
and `tabHas Role`.role in ({roles})
""".format(field=parent.lower(), parent=parent, column=column,
roles = ', '.join(['%s']*len(roles))), roles, as_dict=1)
for p in pages_with_custom_roles:
has_role[p.name] = {"modified":p.modified, "title": p.title, "ref_doctype": p.ref_doctype}
pages_with_standard_roles = frappe.db.sql("""
select distinct
`tab{parent}`.name as name,
`tab{parent}`.modified,
{column}
from `tabHas Role`, `tab{parent}`
where
`tabHas Role`.role in ({roles})
and `tabHas Role`.parent = `tab{parent}`.name
and `tab{parent}`.`name` not in (
select `tabCustom Role`.{field} from `tabCustom Role`
where `tabCustom Role`.{field} is not null)
{condition}
""".format(parent=parent, column=column, roles = ', '.join(['%s']*len(roles)),
field=parent.lower(), condition="and `tabReport`.disabled=0" if parent == "Report" else ""),
roles, as_dict=True)
for p in pages_with_standard_roles:
if p.name not in has_role:
has_role[p.name] = {"modified":p.modified, "title": p.title}
if parent == "Report":
has_role[p.name].update({'ref_doctype': p.ref_doctype})
# pages with no role are allowed
if parent =="Page":
pages_with_no_roles = frappe.db.sql("""
select
`tab{parent}`.name, `tab{parent}`.modified, {column}
from `tab{parent}`
where
(select count(*) from `tabHas Role`
where `tabHas Role`.parent=`tab{parent}`.`name`) = 0
""".format(parent=parent, column=column), as_dict=1)
for p in pages_with_no_roles:
if p.name not in has_role:
has_role[p.name] = {"modified": p.modified, "title": p.title}
elif parent == "Report":
reports = frappe.get_all("Report",
fields=["name", "report_type"],
filters={"name": ("in", has_role.keys())},
ignore_ifnull=True
)
for report in reports:
has_role[report.name]["report_type"] = report.report_type
# Expire every six hours
_cache.set_value('has_role:' + parent, has_role, frappe.session.user, 21600)
return has_rol | e
def get_column(doctype):
column = "`tabPage`.title as title"
if doctype == "Report":
column = "`tabReport`.`name` as title, `tabReport`.ref_doctype, `tabReport`.report_type"
return column
def load_translations(bootinfo):
messages = frappe.get_lang_dict("boot")
bootinfo["lang"] = frappe.lang
# load translated report names
for name in bootinfo.user.all_reports:
messages[name] = frappe._(name)
# only untranslated
messages = | {k:v for k, v in iteritems(messages) if k!=v}
bootinfo["__messages"] = messages
def get_fullnames():
"""map of user fullnames"""
ret = frappe.db.sql("""select `name`, full_name as fullname,
user_image as image, gender, email, username, bio, location, interest, banner_image, allowed_in_mentions
from tabUser where enabled=1 and user_type!='Website User'""", as_dict=1)
d = {}
for r in ret:
# if not r.image:
# r.image = get_gravatar(r.name)
d[r.name] = r
return d
def get_user(bootinfo):
"""get user info"""
bootinfo.user = frappe.get_user().load |
Ryszard-Ps/rsr-calculator | rsr_calculator/version.py | Python | gpl-3.0 | 77 | 0 | # -*- coding: utf-8 -*-
| """#Versión de la calculadora."""
v | ersion = '1.0.0'
|
killpanda/Ailurus | ailurus/fedora/apps_eclipse.py | Python | gpl-2.0 | 7,840 | 0.008291 | #coding: utf-8
#
# Ailurus - a simple application installer and GNOME tweaker
#
# Copyright (C) 2009-2010, Ailurus developers and Ailurus contributors
# Copyright (C) 2007-2010, Trusted Digital Technology Laboratory, Shanghai Jiao Tong University, China.
#
# Ailurus is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Ailurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ailurus; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import with_statement
import sys, os
from lib import *
from libapp import *
class Eclipse(_rpm_install):
__doc__ = _('Eclipse (basic development environment)')
detail = _('You can install Language pack according to the instructions on the page http://www.eclipse.org/babel/downloads.php')
category = 'ide'
license = EPL + ' http://www.eclipse.org/org/documents/epl-v10.php'
pkgs = 'eclipse-platform' # Eclipse without any plugin
def make_sure_installed():
if not RPM.installed('eclipse-platform'): RPM.install('eclipse-platform')
class CDT(_rpm_install):
__doc__ = _('CDT: C/C++ development')
category = 'eclipse_extension'
license = EPL + ' http://www.eclipse.org/legal/'
pkgs = 'eclipse-cdt'
class Pydev(_rpm_install):
__doc__ = _('Pydev: Python development')
category = 'eclipse_extension'
license = EPL + ' http://pydev.org/about.html'
pkgs = 'eclipse-pydev'
class Aptana(I):
__doc__ = _('Aptana: Web application development')
detail = _('Due to limitation of the authors\' programming ability, Aptana cannot be removed by Ailurus.\n'
'In order to remove Aptana, please launch Eclipse, and go to "Help" -> "About Eclipse SDK" -> "Installation Details"')
download_url = 'http://www.aptana.org/studio/plugin'
how_to_install = 'http://download.aptana.org/tools/studio/plugin/install/studio'
category = 'eclipse_extension'
license = DUAL_LICENSE(APL, GPL)
def installed(self):
import glob
List = glob.glob('/usr/lib/eclipse/plugins/com.aptana.ide.*')
return bool(List)
def install(self):
make_sure_installed()
import StringIO
msg = StringIO.StringIO()
print >>msg, _('Please launch Eclipse, and go to "Help" -> "Install New Software".')
print >>msg
print >>msg, _('Click the "Add" button. Then type <b>%s</b> in "Location".')%'http://download.aptana.org/tools/studio/plugin/install/studio'
print >>msg
print >>msg, _('Then click the "Next" button and agree the license.')
install_eclipse_extension_message( _('Installing Aptana'), msg )
def remove(self):
remove_eclipse_extesion_message(self.__class__.__name__)
class RadRails(I):
__doc__ = _('RadRails: Ruby development')
detail = _('Over the past RadRails was called "RDT".')
how_to_install = 'http://download.aptana.com/tools/radrails/plugin/install/radrails-bundle'
category = 'eclipse_extension'
license = DUAL_LICENSE(APL, GPL)
def installed(self):
import glob
List = glob.glob('/usr/lib/eclipse/plugins/com.aptana.radrails.*')
return bool(List)
def install(self):
make_sure_installed()
import StringIO
msg = StringIO.StringIO()
print >>msg, _('Please launch Eclipse, and go to "Help" -> "Install New Software".')
print >>msg
print >>msg, _('Click the "Add" button. Then type <b>%s</b> in "Location".')%'http://download.aptana.com/tools/radrails/plugin/install/radrails-bundle'
print >>msg
print >>msg, _('Then click the "Next" button and agree the license.')
install_eclipse_extension_message( _('Installing RadRails\n'), msg )
def remove(self):
remove_eclipse_extesion_message(self.__class__.__name__)
class PDT(I):
__doc__ = _('PDT: PHP development')
download_url = 'http://www.eclipse.org/pdt/downloads/'
category = 'eclipse_extension'
license = EPL + ' http://www.eclipse.org/legal/'
def installed(self):
import glob
List = glob.glob('/usr/lib/eclipse/plugins/org.eclipse.php.*')
return bool(List)
def install(self):
if not RPM.installed('eclipse-dltk-sdk'):
RPM.install('eclipse-dltk-sdk')
import StringIO
msg = StringIO.StringIO()
print >>msg, _('Please launch Eclipse, and go to "Help" -> "Install New Software".')
print >>msg
print >>msg, _('Click the "Add" button. Then type <b>%s</b> in "Location".')%'http://www.eclipse.org/pdt/downloads/'
print >>msg
print >>msg, _('Then click the "Next" button and agree the license.')
install_eclipse_extension_message( _('Installing PDT\n'), msg )
def remove(self):
remove_eclipse_extesion_message(self.__class__.__name__)
class PHPEclipse(_rpm_install):
__doc__ = _('PHPEclipse: PHP development')
category = 'eclipse_extension'
pkgs = 'eclipse-phpeclipse'
class Subversive(I):
__doc__ = _('Subversive: Use SVN in Eclipse')
how_to_install = 'http://download.eclipse.org/technology/subversive/0.7/update-site/'
category = 'eclipse_extension'
license = EPL
def installed(self):
import glob
List = glob.glob('/usr/lib/eclipse/plugins/org.eclipse.team.svn.*')
return bool(List)
def install(self):
make_sure_installed()
import StringIO
msg = StringIO.StringIO()
print >>msg, _('Please launch Eclipse, and go to "Help" -> "Install New Software".')
print >>msg
print >>msg, _('Click the "Add" button. Then type <b>%s</b> in "Location".')%'http://download.eclipse.org/technology/subversive/0.7/update-site/'
print >>msg
print >>msg, _('Then click the "Next" button and agree the license.')
install_eclipse_extension_message( _('Installing Subversive\n'), msg )
def remove(self):
remove_eclipse_extesion_message(self.__class__.__name__)
class Subclipse(_rpm_install):
__doc__ = _('Subclipse: Use SVN in Eclipse')
category = 'eclipse_extension'
pkgs = 'eclipse-subclipse'
class VEditor(_rpm_install):
__doc__ = _('VEditor: Verilog and VHDL editor')
category = 'eclipse_extension'
pkgs = 'eclipse-veditor'
class Mylyn(_rpm_install):
__doc__ = _('Mylyn: Task-focused UI for Eclipse')
cagetory = 'eclipse_extension'
pkgs = 'eclipse-mylyn'
class Photran(_rpm_install):
__doc__ = _('Photran: Fortran development')
cagetory = 'eclipse_extension'
pkgs = 'eclipse-photran'
class Texlipse(_rpm_install):
__doc__ = _('Texlipse: Edit LaTeX in Eclipse')
cagetory = 'eclipse_extension'
pkgs = 'eclipse-texlipse'
class MTJ(_path_lists):
__doc__ = _('MTJ: J2ME development')
download_url = 'http://download.eclipse.org/dsdp/mtj/downloads/drops/R-1.0.1-200909181641/'
category = 'eclipse_extension'
license = DUAL_LICENSE(EPL, GPL)
def __init__(self):
self.path = '/usr/lib/eclipse/ | dropins/MTJ/'
self.paths = [ s | elf.path ]
def install(self):
make_sure_installed()
path = A+'/support/MTJ_urls'
with open(path) as f:
urls = f.readlines()
urls = [u.strip() for u in urls]
f = R(urls).download()
run_as_root('mkdir -p '+self.path)
run_as_root("unzip -qo %s -d %s"%(f, self.path))
|
osspeak/osspeak | osspeak/pyautogui/__init__.py | Python | mit | 36,647 | 0.002483 | # PyAutoGUI: Cross-platform GUI automation for human beings.
# BSD license
# Al Sweigart al@inventwithpython.com (Send me feedback & suggestions!)
"""
IMPORTANT NOTE!
To use this module on Mac OS X, you need the PyObjC module installed.
For Python 3, run:
sudo pip3 install pyobjc-core
sudo pip3 install pyobjc
For Python 2, run:
sudo pip install pyobjc-core
sudo pip install pyobjc
(There's some bug with their installer, so install pyobjc-core first or else
the install takes forever.)
To use this module on Linux, you need Xlib module installed.
For Python 3, run:
sudo pip3 install python3-Xlib
For Python 2, run:
sudo pip install Xlib
To use this module on Windows, you do not need anything else.
You will need PIL/Pillow to use the screenshot features.
"""
from __future__ import absolute_import, division, print_function
__version__ = '0.9.33'
import collections
import sys
import time
KEY_NAMES = ['\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(',
')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`',
'a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~',
'accept', 'add', 'alt', 'altleft', 'altright', 'apps', 'back' 'backspace',
'browserback', 'browserfavorites', 'browserforward', 'browserhome',
'browserrefresh', 'browsersearch', 'browserstop', 'capslock', 'clear',
'convert', 'ctrl', 'ctrlleft', 'ctrlright', 'decimal', 'del', 'delete',
'divide', 'down', 'end', 'enter', 'esc', 'escape', 'execute', 'f1', 'f10',
'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f2', 'f20',
'f21', 'f22', 'f23', 'f24', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
'final', 'fn', 'hanguel', 'hangul', 'hanja', 'help', 'home', 'insert', 'junja',
'kana', 'kanji', 'launchapp1', 'launchapp2', 'launchmail',
'launchmediaselect', 'left', 'modechange', 'multiply', 'nexttrack',
'nonconvert', 'num0', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6',
'num7', 'num8', 'num9', 'numlock', 'pagedown', 'pageup', 'pause', 'pgdn',
'pgup', 'playpause', 'prevtrack', 'print', 'printscreen', 'prntscrn',
'prtsc', 'prtscr', 'return', 'right', 'scrolllock', 'select', 'separator',
'shift', 'shiftleft', 'shiftright', 'sleep', 'stop', 'subtract', 'tab',
'up', 'volumedown', 'volumemute', 'volumeup', 'win', 'winleft', 'winright', 'yen',
'command', 'option', 'optionleft', 'optionright']
KEYBOARD_KEYS = KEY_NAMES # keeping old KEYBOARD_KEYS for backwards compatibility
def isShiftCharacter(character):
"""Returns True if the key character is uppercase or shifted."""
return character.isupper() or character in '~!@#$%^&*()_+{}|:"<>?'
# The platformModule is where we reference the platform-specific functions.
if sys.platform.startswith('java'):
#from . import _pyauto | gui_java as platformModule
raise NotImplementedError('Jython is not yet supported by PyAutoGUI.')
elif sys.platform == 'darwin':
from . import _pyautogui_osx as platformModule
elif sys.platfo | rm == 'win32':
from . import _pyautogui_win as platformModule
else:
from . import _pyautogui_x11 as platformModule
# TODO: Having module-wide user-writable global variables is bad. It makes
# restructuring the code very difficult. For instance, what if we decide to
# move the mouse-related functions to a separate file (a submodule)? How that
# file will access this module vars? It will probably lead to a circular
# import.
# In seconds. Any duration less than this is rounded to 0.0 to instantly move
# the mouse.
MINIMUM_DURATION = 0.1
# If sleep_amount is too short, time.sleep() will be a no-op and the mouse
# cursor moves there instantly.
# TODO: This value should vary with the platform. http://stackoverflow.com/q/1133857
MINIMUM_SLEEP = 0.05
PAUSE = 0.1 # The number of seconds to pause after EVERY public function call. Useful for debugging.
FAILSAFE = True
# General Functions
# =================
def getPointOnLine(x1, y1, x2, y2, n):
"""Returns the (x, y) tuple of the point that has progressed a proportion
n along the line defined by the two x, y coordinates.
Copied from pytweening module.
"""
x = ((x2 - x1) * n) + x1
y = ((y2 - y1) * n) + y1
return (x, y)
def linear(n):
"""Trivial linear tweening function.
Copied from pytweening module.
"""
if not 0.0 <= n <= 1.0:
raise ValueError('Argument must be between 0.0 and 1.0.')
return n
def _autoPause(pause, _pause):
if _pause:
if pause is not None:
time.sleep(pause)
elif PAUSE != 0:
time.sleep(PAUSE)
def _unpackXY(x, y):
"""If x is a sequence and y is None, returns x[0], y[0]. Else, returns x, y.
On functions that receive a pair of x,y coordinates, they can be passed as
separate arguments, or as a single two-element sequence.
"""
if isinstance(x, collections.Sequence):
if len(x) == 2:
if y is None:
x, y = x
else:
raise ValueError('When passing a sequence at the x argument, the y argument must not be passed (received {0}).'.format(repr(y)))
else:
raise ValueError('The supplied sequence must have exactly 2 elements ({0} were received).'.format(len(x)))
else:
pass
return x, y
def position(x=None, y=None):
"""Returns the current xy coordinates of the mouse cursor as a two-integer
tuple.
Args:
x (int, None, optional) - If not None, this argument overrides the x in
the return value.
y (int, None, optional) - If not None, this argument overrides the y in
the return value.
Returns:
(x, y) tuple of the current xy coordinates of the mouse cursor.
"""
posx, posy = platformModule._position()
posx = int(posx)
posy = int(posy)
if x is not None:
posx = int(x)
if y is not None:
posy = int(y)
return posx, posy
def size():
"""Returns the width and height of the screen as a two-integer tuple.
Returns:
(width, height) tuple of the screen size, in pixels.
"""
return platformModule._size()
def onScreen(x, y=None):
"""Returns whether the given xy coordinates are on the screen or not.
Args:
Either the arguments are two separate values, first arg for x and second
for y, or there is a single argument of a sequence with two values, the
first x and the second y.
Example: onScreen(x, y) or onScreen([x, y])
Returns:
bool: True if the xy coordinates are on the screen at its current
resolution, otherwise False.
"""
x, y = _unpackXY(x, y)
x = int(x)
y = int(y)
width, height = platformModule._size()
return 0 <= x < width and 0 <= y < height
# Mouse Functions
# ===============
def mouseDown(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs pressing a mouse button down (but not up).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse down happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
mouse down happens. None by default.
button (str, int, optional): The mouse button pressed down. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" |
web-izmerenie/avto-lux161 | avto-lux/app/core/routes/testroute.py | Python | agpl-3.0 | 162 | 0.012346 | # -*- coding: utf-8 -*-
f | rom .base import BaseHandler
class TestRoute(BaseHandler):
def get(self, file):
return self | .render(str(file) + '.jade', show_h1=1)
|
ezeakeal/educube_client | educube/client.py | Python | gpl-3.0 | 2,664 | 0.003378 | #!/usr/bin/env python
import os
import sys
import json
import click
import serial
import pkg_resources
import serial.tools.list_ports
import logging.config
from educube.web import server as webserver
import logging
logger = logging.getLogger(__name__)
plugin_folder = os.path.join(os.path.dirname(__file__), 'commands')
def configure_logging(verbose):
loglevels = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
logging.basicConfig(level=loglevels[verbose])
def verify_serial_connection(port, baud):
try:
ser = serial.Serial(port, baud, timeout=1)
a = ser.read()
if a:
logger.debug('Serial open: %s' % port)
else:
logger.debug('Serial exists but is not readable (permissions?): %s' % port)
ser.close()
| except serial.serialutil.SerialException as e:
raise click.BadParameter("Serial not readable: %s" % e)
##############################
# COM | MANDS
##############################
def get_serial():
ports = serial.tools.list_ports.comports()
suggested_educube_port = ports[-1]
return suggested_educube_port.device
def get_baud():
ports = serial.tools.list_ports.comports()
suggested_educube_port = ports[-1]
if suggested_educube_port.description == 'BASE':
return 9600
else:
return 115200
@click.group()
@click.option('-v', '--verbose', count=True)
@click.pass_context
def cli(ctx, verbose):
"""Educube Client"""
configure_logging(verbose)
@cli.command()
def version():
"""Prints the EduCube client version"""
print(pkg_resources.require("educube")[0].version)
@cli.command()
@click.option('-s', '--serial', default=get_serial, prompt=True)
@click.option('-b', '--baud', default=get_baud, prompt=True)
@click.option('-e', '--board', default='CDH')
@click.option('--fake', is_flag=True, default=False, help="Fake the serial")
@click.option('--json', is_flag=True, default=False, help="Outputs mostly JSON instead")
@click.pass_context
def start(ctx, serial, baud, board, fake, json):
"""Starts the EduCube web interface"""
logger.debug("""Running with settings:
Serial: %s
Baudrate: %s
EduCube board: %s
""" % (serial, baud, board))
ctx.obj['connection'] = {
"type": "serial",
"port": serial,
"baud": baud,
"board": board,
"fake": fake,
}
if not fake:
verify_serial_connection(serial, baud)
webserver.start_webserver(
connection=ctx.obj.get('connection')
)
def main():
cli(obj={})
if __name__ == '__main__':
main() |
fbarreir/panda-server | pandaserver/test/deleteJobs.py | Python | apache-2.0 | 5,897 | 0.010005 | import os
import re
import sys
import time
import fcntl
import types
import shelve
import random
import datetime
import commands
import threading
import userinterface.Client as Client
from dataservice.DDM import ddm
from dataservice.DDM import dashBorad
from taskbuffer.OraDBProxy import DBProxy
from taskbuffer.TaskBuffer import taskBuffer
from pandalogger.PandaLogger import PandaLogger
from jobdispatcher.Watcher import Watcher
from brokerage.SiteMapper import SiteMapper
from dataservice.Adder import Adder
from dataservice.Finisher import Finisher
from dataservice.MailUtils import MailUtils
from taskbuffer import ProcessGroups
import brokerage.broker_util
import brokerage.broker
import taskbuffer.ErrorCode
import dataservice.DDM
# password
from config import panda_config
passwd = panda_config.dbpasswd
# logger
_logger = PandaLogger().getLogger('deleteJobs')
_logger.debug("===================== start =====================")
# memory checker
def _memoryCheck(str):
try:
proc_status = '/proc/%d/status' % os.getpid()
procfile = open(proc_status)
name = ""
vmSize = ""
vmRSS = ""
# extract Name,VmSize,VmRSS
for line in procfile:
if line.startswith("Name:"):
name = line.split()[-1]
continue
if line.startswith("VmSize:"):
vmSize = ""
for item in line.split()[1:]:
vmSize += item
continue
if line.startswith("VmRSS:"):
vmRSS = ""
for item in line.split()[1:]:
vmRSS += item
continue
procfile.close()
_logger.debug('MemCheck - %s Name=%s VSZ=%s RSS=%s : %s' % (os.getpid(),name,vmSize,vmRSS,str))
except:
type, value, traceBack = sys.exc_info()
_logger.error("memoryCheck() : %s %s" % (type,value))
_logger.debug('MemCheck - %s unknown : %s' % (os.getpid(),str))
return
_memoryCheck("start")
# kill old process
try:
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# get process list
scriptName = sys.argv[0]
out = commands.getoutput('ps axo user,pid,lstart,args | grep %s' % scriptName)
for line in out.split('\n'):
items = line.split()
# owned process
if not items[0] in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
continue
# look for python
if re.search('python',line) == None:
continue
# PID
pid = items[1]
# start time
timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# kill old process
if startTime < timeLimit:
_logger.debug("old process : %s %s" % (pid,startTime))
_logger.debug(line)
commands.getoutput('kill -9 %s' % pid)
except:
type, value, traceBack = sys.exc_info()
_logger.error("kill process : %s %s" % (type,value))
# instantiate TB
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# table names
jobATableName = "ATLAS_PANDAARCH.jobsArchived"
filesATableName = "ATLAS_PANDAARCH.filesTable_ARCH"
paramATableName = "ATLAS_PANDAARCH.jobParamsTable_ARCH" |
metaATableName = "ATLAS_PANDAARCH.metaTable_ARCH"
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=3)
# delete
_logger.debug("get PandaIDs for Delete")
sql = "SELECT COUNT(*) FROM ATLAS_PANDA.j | obsArchived4 WHERE modificationTime<:modificationTime"
varMap = {}
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sql,varMap)
if res != None:
tmpTotal = res[0][0]
else:
tmpTotal = None
maxBunch = 1000
nBunch = 500
tmpIndex = 0
while True:
sql = "SELECT PandaID,modificationTime FROM ATLAS_PANDA.jobsArchived4 "
sql += "WHERE modificationTime<:modificationTime AND archivedFlag=:archivedFlag AND rownum<=:rowRange"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':archivedFlag'] = 1
varMap[':rowRange'] = maxBunch
status,res = taskBuffer.querySQLS(sql,varMap)
if res == None:
_logger.error("failed to get PandaIDs to be deleted")
break
else:
_logger.debug("got %s for deletion" % len(res))
if len(res) == 0:
_logger.debug("no jobs left for for deletion")
break
else:
maxBunch = len(res)
random.shuffle(res)
res = res[:nBunch]
# loop over all jobs
for (id,srcEndTime) in res:
tmpIndex += 1
try:
# check
sql = "SELECT PandaID from %s WHERE PandaID=:PandaID" % jobATableName
varMap = {}
varMap[':PandaID'] = id
status,check = taskBuffer.querySQLS(sql,varMap)
if check == None or len(check) == 0:
# no record in ArchivedDB
_logger.error("No backup for %s" % id)
else:
# delete
_logger.debug("DEL %s : endTime %s" % (id,srcEndTime))
proxyS = taskBuffer.proxyPool.getProxy()
proxyS.deleteJobSimple(id)
taskBuffer.proxyPool.putProxy(proxyS)
if tmpIndex % 1000 == 1:
_logger.debug(" deleted %s/%s" % (tmpIndex,tmpTotal))
except:
pass
# terminate
if maxBunch < nBunch:
break
_logger.debug("===================== end =====================")
|
lelit/doit | tests/test_cmdparse.py | Python | mit | 5,264 | 0.009878 | import pickle
import pytest
from doit.cmdparse import DefaultUpdate, CmdParseError, CmdOption, CmdParse
class TestDefaultUpdate(object):
def test(self):
du = DefaultUpdate()
du.set_default('a', 0)
du.set_default('b', 0)
assert 0 == du['a']
assert 0 == du['b']
du['b'] = 1
du.update_defaults({'a':2, 'b':2})
assert 2 == du['a']
asser | t 1 == du['b']
def test_add_defaults(self):
du = DefaultUpdate()
du.add_defaults({'a': 0 | , 'b':1})
du['c'] = 5
du.add_defaults({'a':2, 'c':2})
assert 0 == du['a']
assert 1 == du['b']
assert 5 == du['c']
# http://bugs.python.org/issue826897
def test_pickle(self):
du = DefaultUpdate()
du.set_default('x', 0)
dump = pickle.dumps(du,2)
pickle.loads(dump)
class TestCmdOption(object):
def test_repr(self):
opt = CmdOption({'name':'opt1', 'default':'',
'short':'o', 'long':'other'})
assert "CmdOption(" in repr(opt)
assert "'name':'opt1'" in repr(opt)
assert "'short':'o'" in repr(opt)
assert "'long':'other'" in repr(opt)
def test_non_required_fields(self):
opt1 = CmdOption({'name':'op1', 'default':''})
assert '' == opt1.long
def test_invalid_field(self):
opt_dict = {'name':'op1', 'default':'', 'non_existent':''}
pytest.raises(CmdParseError, CmdOption, opt_dict)
def test_missing_field(self):
opt_dict = {'name':'op1', 'long':'abc'}
pytest.raises(CmdParseError, CmdOption, opt_dict)
class TestCmdOption_help_param(object):
def test_bool_param(self):
opt1 = CmdOption({'name':'op1', 'default':'', 'type':bool,
'short':'b', 'long': 'bobo'})
assert '-b, --bobo' == opt1.help_param()
def test_non_bool_param(self):
opt1 = CmdOption({'name':'op1', 'default':'', 'type':str,
'short':'s', 'long': 'susu'})
assert '-s ARG, --susu=ARG' == opt1.help_param()
def test_no_long(self):
opt1 = CmdOption({'name':'op1', 'default':'', 'type':str,
'short':'s'})
assert '-s ARG' == opt1.help_param()
opt_bool = {'name': 'flag',
'short':'f',
'long': 'flag',
'inverse':'no-flag',
'type': bool,
'default': False,
'help': 'help for opt1'}
opt_rare = {'name': 'rare',
'long': 'rare-bool',
'type': bool,
'default': False,
'help': 'help for opt2'}
opt_int = {'name': 'num',
'short':'n',
'long': 'number',
'type': int,
'default': 5,
'help': 'help for opt3'}
opt_no = {'name': 'no',
'short':'',
'long': '',
'type': int,
'default': 5,
'help': 'user cant modify me'}
class TestCmdOption_help_doc(object):
def test_param(self):
opt1 = CmdOption(opt_bool)
got = opt1.help_doc()
assert '-f, --flag' in got[0]
assert 'help for opt1' in got[0]
assert '--no-flag' in got[1]
assert 2 == len(got)
def test_no_doc_param(self):
opt1 = CmdOption(opt_no)
assert 0 == len(opt1.help_doc())
class TestCommand(object):
@pytest.fixture
def cmd(self, request):
opt_list = (opt_bool, opt_rare, opt_int, opt_no)
options = [CmdOption(o) for o in opt_list]
cmd = CmdParse(options)
return cmd
def test_short(self, cmd):
assert "fn:" == cmd.get_short(), cmd.get_short()
def test_long(self, cmd):
assert ["flag", "no-flag", "rare-bool", "number="] == cmd.get_long()
def test_getOption(self, cmd):
# short
opt, is_inverse = cmd.get_option('-f')
assert (opt_bool['name'], False) == (opt.name, is_inverse)
# long
opt, is_inverse = cmd.get_option('--rare-bool')
assert (opt_rare['name'], False) == (opt.name, is_inverse)
# inverse
opt, is_inverse = cmd.get_option('--no-flag')
assert (opt_bool['name'], True) == (opt.name, is_inverse)
# not found
opt, is_inverse = cmd.get_option('not-there')
assert (None, None) == (opt, is_inverse)
def test_parseDefaults(self, cmd):
params, args = cmd.parse([])
assert False == params['flag']
assert 5 == params['num']
def test_parseShortValues(self, cmd):
params, args = cmd.parse(['-n','89','-f'])
assert True == params['flag']
assert 89 == params['num']
def test_parseLongValues(self, cmd):
params, args = cmd.parse(['--rare-bool','--num','89', '--no-flag'])
assert True == params['rare']
assert False == params['flag']
assert 89 == params['num']
def test_parsePositionalArgs(self, cmd):
params, args = cmd.parse(['-f','p1','p2','--sub-arg'])
assert ['p1','p2','--sub-arg'] == args
def test_parseError(self, cmd):
pytest.raises(CmdParseError, cmd.parse, ['--not-exist-param'])
def test_parseWrongType(self, cmd):
pytest.raises(CmdParseError, cmd.parse, ['--num','oi'])
|
TSDBBench/Overlord | vagrant_files/generator/files/databases/opentsdb_cl5_rf1.py | Python | apache-2.0 | 6,981 | 0.010314 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
__author__ = 'Andreas Bader'
__version__ = "0.01"
# db_folders -> List of DB Folder (for space check)
# db_client -> name of ycsb client
# db_args -> special ycsb arguments for this db
# db_name -> name of this db (e.g. for workload file)
# db_desc -> more detailed name/description
# jvm_args -> special jvm_args for this db and ycsb
# prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# postrun -> list of commands to | run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# prerun_master -> list of commands to run before ycsb | (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!)
# check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local))
# check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local))
# check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# basic -> True/False, if True this is a basic database, so no need to ssh for space checking
# sequence -> which vm should be provisioned first? (for all postrun/prerun dicts/lists. First number is considered master db vm, rest are slaves.)
# include -> which base modules should be imported and added to the dictionary (standard functions that are reusable). Warning: infinite import loop possible!
# the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args:
# %%IP%% -> IP of (actual) db vm
# %%IPgen%% -> IP of (actual) generator vm (on which this script runs)
# %%IPn%% -> IP of db vm number n (e.g. %%IP2%%)
# %%IPall%% -> give String with IP of all vms)
# %%HN%% -> Hostname of (actual) db vm
# %%HNgen%% -> Hostname of (actual) generator vm (on which this script runs)
# %%HNn%% -> Hostname of db vm number n (e.g. %%HN2%%)
# %%HNall%% -> give String with Hostname of all vms)
# %%SSH%% -> if SSH should be used (set at the beginning)
# Order of Preruns/Postruns:
# 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict
# General Order:
# prerun -> check -> ycsb -> postrun
def getDict():
dbConfig={}
dbConfig["db_folders"]=["/home/vagrant/hadoop"]
dbConfig["db_client"]="opentsdb"
dbConfig["db_args"]="-p ip=%%IP%% -p port=4242"
dbConfig["db_name"]="opentsdb_cl5_rf1"
dbConfig["db_desc"]="OpenTSDB with HBase and Hadoop on 5 VMs and Replication Factor 1."
dbConfig["jvm_args"]="-jvm-args='-Xmx4096m'"
dbConfig["prerun_once"]= []
dbConfig["postrun_once"]= []
dbConfig["prerun"]= []
dbConfig["postrun"]= []
dbConfig["prerun_master"]= []
dbConfig["postrun_master"]= []
dbConfig["prerun_slaves"]= []
dbConfig["postrun_slaves"]= []
dbConfig["prerun_dict"]= {
0 : ["%%SSH%%sudo -s bash -c '/home/vagrant/hadoop/bin/hdfs namenode -format test'",
"%%SSH%%sudo -s bash /home/vagrant/hadoop/sbin/start-dfs.sh",
#"%%SSH%%sudo -s bash /home/vagrant/hadoop/sbin/start-yarn.sh"
"%%SSH%%sudo -s bash /home/vagrant/hbase/bin/start-hbase.sh",
"%%SSH%%sudo -s bash -c 'sleep 10'",
"%%SSH%%sudo -s bash -c \"COMPRESSION=LZO HBASE_HOME=/home/vagrant/hbase /usr/share/opentsdb/tools/create_table.sh\"",
"%%SSH%%sudo -s bash -c 'systemctl start opentsdb.service'",
"%%SSH%%sudo -s bash -c '/usr/share/opentsdb/bin/tsdb mkmetric usermetric'"
],
}
dbConfig["postrun_dict"]= {}
dbConfig["check"]= []
dbConfig["check_master"]= ["%%SSH%%sudo -s bash -c 'exit $(systemctl status opentsdb.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status opentsdb.service | grep -c \"active (running)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hbase\" | grep -v \"grep hbase\" | wc -l)-5))'",
"%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hadoop\" | grep -v \"grep hadoop\" | wc -l)-5))'"]
dbConfig["check_slaves"]= []
dbConfig["check_dict"]= {
1 : ["%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hbase\" | grep -v \"grep hbase\" | wc -l)-6))'",
"%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hadoop\" | grep -v \"grep hadoop\" | wc -l)-4))'"],
2 : ["%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hbase\" | grep -v \"grep hbase\" | wc -l)-6))'",
"%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hadoop\" | grep -v \"grep hadoop\" | wc -l)-4))'"],
3 : ["%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hbase\" | grep -v \"grep hbase\" | wc -l)-4))'",
"%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hadoop\" | grep -v \"grep hadoop\" | wc -l)-3))'"],
4 : ["%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hbase\" | grep -v \"grep hbase\" | wc -l)-4))'",
"%%SSH%%sudo -s bash -c 'exit $(($(ps ax | grep \"hadoop\" | grep -v \"grep hadoop\" | wc -l)-3))'"],
}
dbConfig["basic"]= False
dbConfig["sequence"]=[1,2,3,4,0]
dbConfig["include"]=["hostsfile_5","hbase", "hadoop"]
return dbConfig |
Mirantis/pumphouse | pumphouse/tasks/node.py | Python | apache-2.0 | 19,990 | 0.00005 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import logging
from taskflow.patterns import graph_flow
from taskflow import task
from pumphouse import events
from pumphouse import exceptions
from pumphouse import flows
from pumphouse.tasks import service as service_tasks
from pumphouse import task as pump_task
from pumphouse import utils
LOG = logging.getLogger(__name__)
assignment = flows.register("assignment", default="fixed")
def extract_macs(info):
macs = set(i["mac"] for i in info["meta"]["interfaces"])
return tuple(macs)
class RetrieveAllEnvironments(task.Task):
def execute(self):
from pumphouse._vendor.fuelclient.objects import environment
envs = dict((env.data["name"], env.data)
for env in environment.Environment.get_all())
return envs
class RetrieveEnvironment(task.Task):
def execute(self, envs_infos, env_name):
return envs_infos[env_name]
class RetrieveEnvNodes(task.Task):
def execute(self, env_info):
from pumphouse._vendor.fuelclient.objects import environment
env = environment.Environment.init_with_data(env_info)
nodes = dict((node.data["fqdn"], node.data)
for node in env.get_all_nodes())
return nodes
class RetrieveNode(task.Task):
def execute(self, nodes_infos, hostname):
return nodes_infos[hostname]
class DeployChanges(pump_task.BaseCloudTask):
def execute(self, env_info, **nodes_info):
from pumphouse._vendor.fuelclient.objects import environment
env = environment.Environment.init_with_data(env_info)
task = env.deploy_changes()
unassigned = set(node_info["id"]
for node_info in nodes_info.itervalues()
if node_info["status"] == "discover")
watched_macs = set(extract_macs(node_info)
for node_info in nodes_info.itervalues())
for progress, nodes in task:
for node in nodes:
node_macs = extract_macs(node.data)
if node_macs in watched_macs:
if node.data["id"] in unassigned:
unassigned.discard(node.data["id"])
self.assign_event(node)
self.provisioning_event(progress, node)
env.update()
return env.data
def revert(self, env_info, result, flow_failures, **nodes_info):
LOG.error("Deploying of changes failed for env %r with result %r",
env_info, result)
def provisioning_event(self, progress, node):
LOG.debug("Waiting for deploy: %r, %r", progress, node)
events.emit("update", {
"id": node.data["fqdn"],
"type": "host",
"cloud": self.cloud.name,
"progress": node.data["progress"],
"data": {
"status": node.data["status"],
}
}, namespace="/events")
def assign_event(self, node):
hostname = node.data["fqdn"]
events.emit("create", {
"id": hostname,
"cloud": self.cloud.name,
"type": "host",
"action": "reassignment",
"data": {
"name": hostname,
}
}, namespace="/events")
class ChooseAnyComputeNode(task.Task):
def execute(self, nodes_infos, env_info):
# XXX | (akscram): The source of the configuration is the first
# node with the `co | mpute` role.
compute_nodes = [info
for info in nodes_infos.values()
if "compute" in info["roles"]]
if not compute_nodes:
raise exceptions.Conflict("There is no any compute nodes in "
"environment %r" % (env_info,))
compute_node = compute_nodes[0]
return compute_node
class ExtractRolesFromNode(task.Task):
def execute(self, node_info):
return node_info["roles"]
class ExtractDisksFromNode(task.Task):
def execute(self, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
disks = node.get_attribute("disks")
return [{
"name": d["name"],
"size": d["size"],
"volumes": d["volumes"],
} for d in disks]
class ExtractIfacesFromNode(task.Task):
def execute(self, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
ifaces = node.get_attribute("interfaces")
return [{
"name": i["name"],
"assigned_networks": i["assigned_networks"],
} for i in ifaces]
class ExtractNetworkDataFromEnv(task.Task):
def execute(self, env_info):
from pumphouse._vendor.fuelclient.objects import environment
env = environment.Environment.init_with_data(env_info)
network_data = env.get_network_data()
return network_data
class PopulateIfacesWithIDs(task.Task):
def execute(self, network_data, ifaces):
ifaces_ids = {n["name"]: n["id"] for n in network_data["networks"]}
ifaces_with_ids = [{
"name": i["name"],
"assigned_networks": [{
"id": ifaces_ids[a],
"name": a,
} for a in i["assigned_networks"]],
} for i in ifaces]
return ifaces_with_ids
class ApplyDisksAttributesFromNode(task.Task):
def execute(self, disks, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
node_disks = node.get_attribute("disks")
changed_disks = self.update_disks_attrs(disks, node_disks)
node.upload_node_attribute("disks", changed_disks)
node.update()
return node.data
def update_disks_attrs(self, disks1, disks2):
"""Updates geometries of partitions.
Returns a new dict which is made from elements from disk2 with
geometry of partitions from disk1.
"""
def to_dict(attrs):
return dict((attr["name"], attr) for attr in attrs)
attrs = []
disks_dict1 = to_dict(disks1)
for disk in disks2:
volumes = [{"name": v["name"],
"size": v["size"]}
for v in disks_dict1[disk["name"]]["volumes"]]
attrs.append({
"id": disk["id"],
"size": disk["size"],
"volumes": volumes,
})
return attrs
class ApplyNetAttributesFromNode(task.Task):
def execute(self, ifaces, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
node_ifaces = node.get_attribute("interfaces")
changed_ifaces = self.update_ifaces_attrs(ifaces, node_ifaces)
node.upload_node_attribute("interfaces", changed_ifaces)
node.update()
return node.data
def update_ifaces_attrs(self, ifaces1, ifaces2):
"""Updates configuration of network interfaces.
Returns a new dict which is made from elements from ifaces2
with assignments from ifaces1.
"""
def to_dict(attrs):
return dict((attr["name"], attr) for attr in attrs)
attrs = []
ifaces_dict1 = to_dict(ifaces1)
for iface in ifaces2:
attrs.append({
"id": iface["id"],
"type": iface["type"],
"assigned_networks":
ifaces_dict1[ |
titilambert/teeawards | old/jobs/suicides/suicides.py | Python | agpl-3.0 | 4,168 | 0.003599 | from datetime import datetime
from pymongo import DESCENDING
from libs.lib import tee_db
from libs.statisticator import Job
class SuicidesJob(Job):
def __init__(self):
""" Job to get player suicides
Collection name: kill_results
Struture:
{'player': STR ,
'suicides': INT ,
'gametype': STR,
'last_event_date': DATE ,
}
Primary key : 'player'
"""
Job.__init__(self)
results_db_name = 'results_suicides'
self.results_db = tee_db[results_db_name]
self.dependencies = ('players', 'gametypes')
def get_dependencies(self):
return self.dependencies
def load_results_from_cache(self):
res = self.results_db.find(spec={
'player': self.player_name,
'gametype': self.gametype,
},
limit=1,
sort=[{'date', DESCENDING}],
)
if res.count() > 0:
return res[0]
else:
return None
def get_results(self):
res = self.load_results_from_cache()
if res is None:
return []
else:
return res['suicides']
def save_results_to_cache(self):
# Save new line only when data changes
# Else update only the date
last_data = self.load_results_from_cache()
if last_data is not None and last_data['suicides'] == self.results['suicides']:
last_data['date'] = self.results['date']
self.results = last_data
self.results_db.save(self.results)
def process(self, player_name, gametype):
self.player_name = player_name
self.gametype = gametype
# Change status
self.status = 'processing'
# Get old data
self.results = self.load_results_from_cache()
# Set data if no history
if self.results is None:
self.results = {}
self.results['player'] = self.player_name
self.results['gametype'] = self.gametype
self.results['suicides'] = 0
self.results['last_event_date'] = datetime(1,1,1,0,0,0)
# Get new suicides
if self.gametype:
suicides = tee_db['kill'].find(spec={'$and': [
{'weapon': {'$in': ['-1', '0', '1', '2', '3', '4', '5']}},
{'killer': self.player_name},
{'victim': self.player_name},
{'gametype': self.gametype},
{'round': { "$ne": None}},
{'when': {'$gt': self.results['last_event_date']}},
]},
sort=[{'when', DESCENDING}],
)
else:
suicides = tee_db['kill'].find(spec={'$and': [
{'weapon': {'$in': ['-1', '0', '1', '2', '3', '4', '5']}},
{'killer': self.player_name},
{'victim': self.player_name},
{'round': { "$ne": None}},
{'when': {'$gt': self.results['last_event_date']}},
| ]},
sort=[{'when', DESCENDING}],
)
# Set new suicides
self.results['suicides'] += suicides.count()
# Se | t last event date
if suicides.count() > 0:
self.results['last_event_date'] = suicides[0]['when']
self.results['date'] = datetime.now()
# Save to mongo
self.save_results_to_cache()
# Change status
self.status = 'done'
|
LCAV/pyroomacoustics | pyroomacoustics/tests/test_issue_162.py | Python | mit | 663 | 0 | import | numpy as np
import pyroomacoustics as pra
def compute_rir(order):
fromPos = np.zeros((3))
toPos = np.ones((3, 1))
roomSize = np.array([3, 3, 3])
room = pra.ShoeBox(roomSize, fs=1000, absorption=0.95, max_order=order)
room.add_source(fromPos)
mics = pra.MicrophoneArray(toPos, room.fs)
room.add_microphone_array(mics)
room.compute_rir()
def test_issue_162_max_order_15():
| compute_rir(15)
def test_issue_162_max_order_31():
compute_rir(31)
def test_issue_162_max_order_32():
compute_rir(32)
def test_issue_162_max_order_50():
compute_rir(50)
def test_issue_162_max_order_75():
compute_rir(75)
|
sdiehl/rpygtk | rpygtk/ui/about.py | Python | gpl-3.0 | 1,127 | 0.00976 | # Copyright 2009-2010 Stephen Diehl
#
# This file is part of RPyGTK and distributed under the terms
# of the GPLv3 license. See the file LICENSE in the RPyGTK
# distribution for full details.
import sys
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import config
from ui import window
class About(window.Window):
def __init__(self):
self.window = gtk.AboutDialog()
self.handler()
self.window.run()
self.window.destroy()
def handler(self):
self.window.set_name(config.__name__)
self.window.set_authors(config.__authors__ + config.__credits__)
self.window.set_version(config.__version__)
self.window.set_license(config.__license__)
self.window.set_website(config.__website__)
self.window.set_comments(config.__comment__)
self.window.set_copyr | ight("Copyr | ight \302\251 2009-2010 Stephen Diehl")
self.window.set_logo(gtk.gdk.pixbuf_new_from_file('/usr/share/icons/gnome/scalable/categories/applications-utilities.svg')) |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/cred/portal.py | Python | bsd-3-clause | 5,185 | 0.001157 | # -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The point of integration of application and authentication.
"""
from twisted.internet import defer
from twisted.internet.defer import maybeDeferred
from twisted.python import failure, reflect
from twisted.cred import error
from zope.interface import providedBy, Interface
class IRealm(Interface):
"""
The realm connects application-specific objects to the
authentication system.
"""
def requestAvatar(avatarId, mind, *interfaces):
"""Return avatar implementing one of the given interfaces.
@param avatarId: a string that identifies an avatar, as returned by
L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>}
(via a Deferred). Alternatively, it may be
C{twisted.cred.checkers.ANONYMOUS}.
@param mind: usually None. See the description of mind in
L{Portal.login}.
@param interfaces: the interface(s) the returned avatar should
implement, e.g. C{IMailAccount}. See the description of
L{Portal.login}.
@returns: a deferred which will fire a tuple of (interface,
avatarAspect, logout), or the tuple itself. The interface will be
one of the interfaces passed in the 'interfaces' argument. The
'avatarAspect' will implement that interface. The 'logout' object
is a callable which will detach the mind from the avatar.
"""
class Portal:
"""A mediator between clients and a realm.
A portal is associated with one Realm and zero or more credentials checkers.
When a login is attempted, the portal finds the appropriate credentials
checker for the credentials given, invokes it, and if the credentials are
valid, retrieves the appropriate avatar from the Realm.
This class is not intended to be subclassed. Customization should be done
in the realm object and in the credentials checker objects.
"""
def __init__(self, realm, checkers=()):
"""Create a Portal to a L{IRealm}.
"""
self.realm = realm
self.checkers = {}
for checker in checkers:
self.registerChecker(checker)
def listCredentialsInterfaces(self):
"""Return list of credentials inter | faces that can be used to login."""
return self.checkers.keys()
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def login(self, credentials, mind, *interfaces):
"""
@param credentials: an implementor of
twisted.cred. | credentials.ICredentials
@param mind: an object which implements a client-side interface for
your particular realm. In many cases, this may be None, so if the word
'mind' confuses you, just ignore it.
@param interfaces: list of interfaces for the perspective that the mind
wishes to attach to. Usually, this will be only one interface, for
example IMailAccount. For highly dynamic protocols, however, this may
be a list like (IMailAccount, IUserChooser, IServiceInfo). To expand:
if we are speaking to the system over IMAP, any information that will
be relayed to the user MUST be returned as an IMailAccount implementor;
IMAP clients would not be able to understand anything else. Any
information about unusual status would have to be relayed as a single
mail message in an otherwise-empty mailbox. However, in a web-based
mail system, or a PB-based client, the ``mind'' object inside the web
server (implemented with a dynamic page-viewing mechanism such as
woven) or on the user's client program may be intelligent enough to
respond to several ``server''-side interfaces.
@return: A deferred which will fire a tuple of (interface,
avatarAspect, logout). The interface will be one of the interfaces
passed in the 'interfaces' argument. The 'avatarAspect' will implement
that interface. The 'logout' object is a callable which will detach
the mind from the avatar. It must be called when the user has
conceptually disconnected from the service. Although in some cases
this will not be in connectionLost (such as in a web-based session), it
will always be at the end of a user's interactive session.
"""
ifac = providedBy(credentials)
for i in ifac:
c = self.checkers.get(i)
if c is not None:
return maybeDeferred(c.requestAvatarId, credentials
).addCallback(self.realm.requestAvatar, mind, *interfaces
)
return defer.fail(failure.Failure(error.UnhandledCredentials(
"No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
|
mark-me/Pi-Jukebox | venv/Lib/site-packages/mutagen/musepack.py | Python | agpl-3.0 | 9,468 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Lukas Lalinsky
# Copyright (C) 2012 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Musepack audio streams with APEv2 tags.
Musepack is an audio format originally based on the MPEG-1 Layer-2
algorithms. Stream versions 4 through 7 are supported.
For more information, see http://www.musepack.net/.
"""
__all__ = ["Musepack", "Open", "delete"]
import struct
from ._compat import endswith, xrange
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
from mutagen.id3._util import BitPaddedInt
from mutagen._util import cdata, convert_error, intround
class MusepackHeaderError(error):
pass
RATES = [44100, 48000, 37800, 32000]
def _parse_sv8_int(fileobj, limit=9):
"""Reads (max limit) bytes from fileobj until the MSB is zero.
All 7 LSB will be merged to a big endian uint.
Raises ValueError in case not MSB is zero, or EOFError in
case the file ended before limit is reached.
Returns (parsed number, number of bytes read)
"""
num = 0
for i in xrange(limit):
c = fileobj.read(1)
if len(c) != 1:
raise EOFError
c = bytearray(c)
num = (num < | < 7) | (c[0] & 0x7F)
if not c[0] & 0x80:
return num, i + 1
if limit > 0:
raise ValueError
return 0, 0
def _calc_sv8_gain(gain):
# 64.82 taken from mpcdec
return 64.82 - gain / 256.0
def _calc_sv8_peak(peak):
return (10 ** (pe | ak / (256.0 * 20.0)) / 65535.0)
class MusepackInfo(StreamInfo):
"""MusepackInfo()
Musepack stream information.
Attributes:
channels (`int`): number of audio channels
length (`float`): file length in seconds, as a float
sample_rate (`int`): audio sampling rate in Hz
bitrate (`int`): audio bitrate, in bits per second
version (`int`) Musepack stream version
Optional Attributes:
Attributes:
title_gain (`float`): Replay Gain for this song
title_peak (`float`): Peak data for this song
album_gain (`float`): Replay Gain for this album
album_peak (`float`): Peak data for this album
These attributes are only available in stream version 7/8. The
gains are a float, +/- some dB. The peaks are a percentage [0..1] of
the maximum amplitude. This means to get a number comparable to
VorbisGain, you must multiply the peak by 2.
"""
@convert_error(IOError, MusepackHeaderError)
def __init__(self, fileobj):
"""Raises MusepackHeaderError"""
header = fileobj.read(4)
if len(header) != 4:
raise MusepackHeaderError("not a Musepack file")
# Skip ID3v2 tags
if header[:3] == b"ID3":
header = fileobj.read(6)
if len(header) != 6:
raise MusepackHeaderError("not a Musepack file")
size = 10 + BitPaddedInt(header[2:6])
fileobj.seek(size)
header = fileobj.read(4)
if len(header) != 4:
raise MusepackHeaderError("not a Musepack file")
if header.startswith(b"MPCK"):
self.__parse_sv8(fileobj)
else:
self.__parse_sv467(fileobj)
if not self.bitrate and self.length != 0:
fileobj.seek(0, 2)
self.bitrate = intround(fileobj.tell() * 8 / self.length)
def __parse_sv8(self, fileobj):
# SV8 http://trac.musepack.net/trac/wiki/SV8Specification
key_size = 2
mandatory_packets = [b"SH", b"RG"]
def check_frame_key(key):
if ((len(frame_type) != key_size) or
(not b'AA' <= frame_type <= b'ZZ')):
raise MusepackHeaderError("Invalid frame key.")
frame_type = fileobj.read(key_size)
check_frame_key(frame_type)
while frame_type not in (b"AP", b"SE") and mandatory_packets:
try:
frame_size, slen = _parse_sv8_int(fileobj)
except (EOFError, ValueError):
raise MusepackHeaderError("Invalid packet size.")
data_size = frame_size - key_size - slen
# packets can be at maximum data_size big and are padded with zeros
if frame_type == b"SH":
mandatory_packets.remove(frame_type)
self.__parse_stream_header(fileobj, data_size)
elif frame_type == b"RG":
mandatory_packets.remove(frame_type)
self.__parse_replaygain_packet(fileobj, data_size)
else:
fileobj.seek(data_size, 1)
frame_type = fileobj.read(key_size)
check_frame_key(frame_type)
if mandatory_packets:
raise MusepackHeaderError("Missing mandatory packets: %s." %
", ".join(map(repr, mandatory_packets)))
self.length = float(self.samples) / self.sample_rate
self.bitrate = 0
def __parse_stream_header(self, fileobj, data_size):
# skip CRC
fileobj.seek(4, 1)
remaining_size = data_size - 4
try:
self.version = bytearray(fileobj.read(1))[0]
except (TypeError, IndexError):
raise MusepackHeaderError("SH packet ended unexpectedly.")
remaining_size -= 1
try:
samples, l1 = _parse_sv8_int(fileobj)
samples_skip, l2 = _parse_sv8_int(fileobj)
except (EOFError, ValueError):
raise MusepackHeaderError(
"SH packet: Invalid sample counts.")
self.samples = samples - samples_skip
remaining_size -= l1 + l2
data = fileobj.read(remaining_size)
if len(data) != remaining_size:
raise MusepackHeaderError("SH packet ended unexpectedly.")
self.sample_rate = RATES[bytearray(data)[0] >> 5]
self.channels = (bytearray(data)[1] >> 4) + 1
def __parse_replaygain_packet(self, fileobj, data_size):
data = fileobj.read(data_size)
if data_size < 9:
raise MusepackHeaderError("Invalid RG packet size.")
if len(data) != data_size:
raise MusepackHeaderError("RG packet ended unexpectedly.")
title_gain = cdata.short_be(data[1:3])
title_peak = cdata.short_be(data[3:5])
album_gain = cdata.short_be(data[5:7])
album_peak = cdata.short_be(data[7:9])
if title_gain:
self.title_gain = _calc_sv8_gain(title_gain)
if title_peak:
self.title_peak = _calc_sv8_peak(title_peak)
if album_gain:
self.album_gain = _calc_sv8_gain(album_gain)
if album_peak:
self.album_peak = _calc_sv8_peak(album_peak)
def __parse_sv467(self, fileobj):
fileobj.seek(-4, 1)
header = fileobj.read(32)
if len(header) != 32:
raise MusepackHeaderError("not a Musepack file")
# SV7
if header.startswith(b"MP+"):
self.version = bytearray(header)[3] & 0xF
if self.version < 7:
raise MusepackHeaderError("not a Musepack file")
frames = cdata.uint_le(header[4:8])
flags = cdata.uint_le(header[8:12])
self.title_peak, self.title_gain = struct.unpack(
"<Hh", header[12:16])
self.album_peak, self.album_gain = struct.unpack(
"<Hh", header[16:20])
self.title_gain /= 100.0
self.album_gain /= 100.0
self.title_peak /= 65535.0
self.album_peak /= 65535.0
self.sample_rate = RATES[(flags >> 16) & 0x0003]
self.bitrate = 0
# SV4-SV6
else:
header_dword = cdata.uint_le(header[0:4])
self.version = (header_dword >> 11) & 0x03FF
if self.version < 4 or self.version > 6:
raise MusepackHeaderError("not a Musepack file")
self.bitrate = (header_dword >> 23) & 0x01F |
hemmerling/codingdojo | src/game_of_life/python_coderetreat_berlin_2014-09/python_legacycrberlin03/gol03.py | Python | apache-2.0 | 554 | 0.01444 | #----------- | --------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrator
#
# Created: 08/10/2011
# Copyright: (c) Administrator 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
class Gol03:
def __init__(self):
pass
def main(self):
return
def set_griddata(self,griddata):
self. | griddata =
if __name__ == '__main__':
pass |
weidel-p/nest-simulator | pynest/nest/tests/test_onetooneconnect.py | Python | gpl-2.0 | 3,758 | 0.000532 | # -*- coding: utf-8 -*-
#
# test_onetooneconnect.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
UnitTests for the PyNEST connect API.
"""
import unittest
import nest
@nest.ll_api.check_stack
class OneToOneConnectTestCase(unittest.TestCase):
"""Tests of Connect with OneToOne pattern"""
def setUp(self):
nest.ResetKernel()
def test_ConnectPrePost(self):
"""Connect pre to post"""
pre = nest.Create("iaf_psc_alpha", 2)
post = nest.Create("iaf_psc_alpha", 2)
nest.Connect(pre, post, "one_to_one")
connections = nest.GetConnections(pre)
targets = connections.get("target")
self.assertEqual(list(targets), post.tolist())
def test_ConnectPrePostParams(self):
"""Connect pre to post with a params dict"""
pre = nest.Create("iaf_psc_alpha", 2)
post = nest.Create("iaf_psc_alpha", 2)
nest.Connect(pre, post, "one_to_one", syn_spec={"weight": 2.0})
connections = nest.GetConnections(pre)
weights = connections.get("weight")
self.assertEqual(weights, [2.0, 2.0])
nest.ResetKernel()
pre = nest.Create("iaf_psc_alpha", 2)
post = nest.Create("iaf_psc_alpha", 2)
nest.Connect(pre, post, conn_spec={"rule": "one_to_one"},
syn_spec={"weight": [2.0, 3.0]})
| connections = nest.GetConnections(pre)
weights = connections.get("weight")
self.assertEqual(weights, [2.0, 3.0])
def test_ConnectPrePostWD(self):
"""Connect pre to post with a weight and delay"""
pre = nest.Create("iaf_psc_alpha", 2)
post = nest.Create("iaf_psc_alpha", 2)
| nest.Connect(pre, post, conn_spec={"rule": "one_to_one"},
syn_spec={"weight": 2.0, "delay": 2.0})
connections = nest.GetConnections(pre)
weights = connections.get("weight")
delays = connections.get("delay")
self.assertEqual(weights, [2.0, 2.0])
self.assertEqual(delays, [2.0, 2.0])
nest.ResetKernel()
pre = nest.Create("iaf_psc_alpha", 2)
post = nest.Create("iaf_psc_alpha", 2)
nest.Connect(pre, post, conn_spec={"rule": "one_to_one"},
syn_spec={"weight": [2.0, 3.0], "delay": [2.0, 3.0]})
connections = nest.GetConnections(pre)
weights = connections.get("weight")
delays = connections.get("delay")
self.assertEqual(weights, [2.0, 3.0])
self.assertEqual(delays, [2.0, 3.0])
def test_IllegalConnection(self):
"""Wrong Connections"""
n = nest.Create('iaf_psc_alpha')
vm = nest.Create('voltmeter')
sd = nest.Create('spike_detector')
self.assertRaisesRegex(nest.kernel.NESTError, "IllegalConnection", nest.Connect, n, vm)
self.assertRaisesRegex(nest.kernel.NESTError, "IllegalConnection", nest.Connect, sd, n)
def suite():
suite = unittest.makeSuite(OneToOneConnectTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
fhats/py_razor_client | py_razor_client/razor_client.py | Python | mit | 4,490 | 0.000668 | # -*- coding: utf-8 -*-
"""Contains a quick-and-dirty client for talking to a Razor server.
Rather than enumerating an API that has lots of warning labels and stickers
cautioning that the API might change frequently, I'm letting the razor server
drive. This class will, when given a host and port, ask razor for its API
capabilities. For each collection it finds (things like nodes, repos, etc), two
methods will be bound to this class: a lister (the plural noun) and a getter
(the singular noun). For each command it finds, a method with the same name
is bound to the class after a bit of sanitization (at this time, just changing
hyphens to underscores to make Python happy). Similar sanitization happens to
any conflicting argument names for commands.
Example usage:
client = RazorClient("example.com", 8080)
client.repos()
client.nodes()
client.nodes("node1")
client.create_repo(name="test_repo", iso_url="http://example.com/img.iso")
"""
from functools import partial
import json
import urlparse
import requests
class RazorClient(object):
# The below tranformation mapping is somewhat unfortunate, but ultimately
# necessary to fit in here, since arguments like iso-url can't be specified
# as python keywords.
ARG_TRANSFORMS = {
"broker_type": "broker-type",
"iso_url": "iso-url",
}
API_PATH = "/api" # It's less likely that this will change
def __init__(self, hostname, port, lazy_discovery=False):
self.hostname = hostname
self.port = str(port)
self._collection_urls = {}
self.collections = set()
self.commands = set()
if not lazy_discovery:
self.discover_methods()
def get_path(self, path, response_as_json=True):
url = self._coerce_to_full_url(path)
response = requests.get(url)
response.raise_for_status() # makes sure errors get propagated as exceptions
if response_as_json:
return response.json()
else:
return response.text
def post_data(self, path, **data):
url = self._coerce_to_full_url(path)
headers = {
"Content-Type": "application/json",
}
response = requests.post(url, headers=headers, data=json.dumps(data))
return response.json()
def discover_methods(self):
methods_data = self.get_path("/api")
for collection in methods_data['collections']:
self._bind_collection(collection)
for command in methods_data['commands']:
self._bind_command(command)
def sanitize_command_name(self, name):
return name.replace("-", "_")
def _coerce_to_full_url(self, maybe_path):
"""Turns what might be a relative path into an asbolute URL."""
if not maybe_path.startswith("http"):
url = self._make_razor_url(maybe_path)
else:
url = maybe_path
return url
def _make_netloc(self):
return ":".join((self.hostname, self.port))
def _make_razor_url(self, path):
netloc = self._make_netloc()
return urlparse.urlunsplit(("http", netloc, path, "", ""))
def _bind_collection(self, collection):
collection_name = collection['name']
collection_url = collection['id']
self._bin | d_method(collection_name, lambda *args, **kwargs: self._get_collection(collection_url, *args, **kwargs))
self.collections.add(collection_name)
def _bind_command(self, command):
command_name = command['name']
command_url = command['id']
# Sanitize the command name so that it maps to something we can call
# as a python identifier
command_name = self.sanitize_command_name(command_name)
self._bind_method(command_name, partial | (self._execute_command, command_url))
self.commands.add(command_name)
def _bind_method(self, method_name, method):
setattr(self, method_name, method)
def _get_collection(self, url, *item):
if item:
item_path = '/'.join(item)
total_item_path = '/'.join((url, item_path))
else:
total_item_path = url
return self.get_path(total_item_path)
def _execute_command(self, url, **kwargs):
for key in kwargs.keys():
if key in self.ARG_TRANSFORMS:
kwargs[self.ARG_TRANSFORMS[key]] = kwargs[key]
del kwargs[key]
return self.post_data(url, **kwargs)
|
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_01_01/operations/_object_replication_policies_operations.py | Python | mit | 21,178 | 0.004675 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("accoun | t_name", account_name, 'str', max_length=24, min_length=3),
"subscr | iptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
object_replication_policy_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"objectReplicationPolicyId": _SERIALIZER.url("object_replication_policy_id", object_replication_policy_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
object_replication_policy_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"objectReplicationPolicyId": _SERIALIZER.url("object_replication_policy_id", object_replication_policy_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
object_replication_policy_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/objectReplicationPolicies/{objectReplicationPolicyId}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"objectReplicationPolicyId": _SERIALIZER.url("object_replication_policy_id", object_replication_policy_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ObjectReplicationPoliciesOperations(object):
"""ObjectReplicationPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this |
GaneshmKumar/Alertify | alertify/alertify.py | Python | mit | 3,659 | 0.004373 | # @Author: ganeshkumarm
# @Date: 2016-11-19T19:20:11+05:30
# @Last modified by: ganeshkumarm
# @Last modified time: 2016-11-19T19:20:45+05:30
#Built in modules
import os
import sys
import time
import subprocess
import datetime
import platform
from win10toast import ToastNotifier
#Used defined module
import exception
class Notify(object):
def __init__(self):
self.title = 'Alert From Alertify'
self.platform = platform.system()
self.toaster = ToastNotifier()
def counter(self, notify_time, message):
s = 00
m = notify_time
if self.platform == 'Linux':
os.system('clear')
elif self.platform == 'Windows':
os.system('cls');
print "Alertify"
print "Alerts in %d minutes %d seconds ..." % (m, s)
time.sleep(1)
s = 59
m -= 1
while s >= 00:
if m == -1:
print "Completed"
print "Bye"
return
if self.platform == 'Linux':
os.system('clear')
elif self.platform == 'Windows':
os.system('cls');
print "Alertify"
print "-------"
print message
print "-" * len(message)
print "Alerts in %d minutes %d seconds ..." % (m, s)
time.sleep(1)
s -= 1
if s | == 0:
s = 59
m -= 1
def sleep_time(self, notify_time):
try:
| time.sleep(notify_time * 60)
except Exception, e:
print e
def sendNotification(self, message, start_time):
try:
end_time = datetime.datetime.now()
diff_time_in_delta = end_time - start_time
diff_time_in_mins = divmod(diff_time_in_delta.days * 86400 + diff_time_in_delta.seconds, 60)
diff_time_msg = ' (Set ' + str(diff_time_in_mins[0]) + ' minutes ' + str(diff_time_in_mins[1]) + ' seconds ago)'
if self.platform == 'Linux':
os.system('notify-send "'+self.title+'" "'+message+'\r'+diff_time_msg+'"')
elif self.platform == 'Windows':
self.toaster.show_toast(self.title, message+'\n'+str(diff_time_msg), duration=300)
except Exception, e:
print e
def main():
try:
counter_flag = True
notify = Notify()
if len(sys.argv) <= 2:
try:
raise exception.PassArgument("Please pass Time and Message as arguments")
except exception.PassArgument, e:
print e.args
print "Exiting ...."
sys.exit()
notify_time = sys.argv[1]
if not notify_time.isdigit():
try:
raise exception.InvalidArgument("Time parameter must be a positive integer value")
except exception.InvalidArgument, e:
print e.args
print "Exiting ...."
sys.exit()
notify_time = int(sys.argv[1])
if sys.argv[len(sys.argv) - 1] == '--no-counter':
message = ' '.join([sys.argv[i] for i in range(2, len(sys.argv) - 1)])
counter_flag = False
else:
message = ' '.join([sys.argv[i] for i in range(2, len(sys.argv))])
start_time = datetime.datetime.now()
if counter_flag:
notify.counter(notify_time, message)
else:
notify.sleep_time(notify_time)
notify.sendNotification(message, start_time)
except KeyboardInterrupt:
print "\nQuitting ..."
print "Bye"
if __name__ == "__main__":
main()
|
googleapis/python-websecurityscanner | samples/generated_samples/websecurityscanner_v1beta_generated_web_security_scanner_get_finding_sync.py | Python | apache-2.0 | 1,523 | 0.001313 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetFinding
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute | the following:
# python3 -m pip install google-cloud-websecurityscanner
# [START websecurityscanner_v1beta_generated_WebSecurityScanner_GetFinding_sync]
from google.cloud import websecurityscanner_v1beta
def sample_get_finding():
# Create a client
client = websecurityscanner_v1beta.WebSecurityScannerClient()
# Initialize request argument(s)
request = websecurityscanner_v1beta.GetFinding | Request(
name="name_value",
)
# Make the request
response = client.get_finding(request=request)
# Handle the response
print(response)
# [END websecurityscanner_v1beta_generated_WebSecurityScanner_GetFinding_sync]
|
Johnzero/titanium-websocket | flask/InitTestWebSocketServer.py | Python | apache-2.0 | 5,551 | 0.014747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2013-06-25 21:56:55
# @Author : Xero
# @Link : https://github.com/Johnzero
# @Version : $Id$
import socket,threading,struct,sys,base64,hashlib
from time import sleep
# If flash Socket The policy that is sent to the clients.
POLICY = """<cross-domain-policy><allow-access-from domain="*" to-ports="*" /></cross-domain-policy>\0"""
# The string the client has to send in order to receive the policy.
POLICYREQUEST = "<policy-file-request/>"
clientpool = []
IP = "192.168.1.13"
#启动websocket server
class InitWebSocketServer(object) :
def __init__(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #tcp 链接
try:
sock.bind((IP,8080)) #绑定本地地址
sock.listen(10)
except:
print("Server is already running,quit")
sys.exit()
while 1: #创建一个死循环,接受客户端
connection,address = sock.accept()
print "Connection from : ",address
if(self.handshake(connection) != False):
#如果握手失败,不启动任务
t = threading.Thread(target=self.DoRemoteCommand,args=(connection,))
t.start()
#连接成功后回应给客户端进行握手
def handshake(self,client):
headers = {}
shake = client.recv(1024)
if not len(shake):
return False
if shake.startswith(POLICYREQUEST):
client.send(POLICY)
return True
header, data = shake.split('\r\n\r\n', 1)
for line in header.split("\r\n")[1:]:
key, value = line.split(": ", 1)
headers[key] = value
if(headers.has_key("Sec-WebSocket-Key") == False):
print("this socket is not websocket,close")
client.close()
return False
szKey = base64.b64encode(hashlib.sha1(headers["Sec-WebSocket-Key"] + '258EAFA5-E914-47DA-95CA-C5AB0DC85B11').digest())
szHost = headers["Host"]
our_handshake = "HTTP/1.1 101 Switching Protocols\r\n" \
"Upgrade:websocket\r\n"\
"Connection: Upgrade\r\n"\
"Sec-WebSocket-Accept:"+ szKey + "\r\n" \
"WebSocket-Origin:" + "localhost" + "\r\n" \
"WebSocket-Location: ws://" + szHost + "/WebManagerSocket\r\n" \
"WebSocket-Protocol:WebManagerSocket\r\n\r\n"
state = client.send(our_handshake)
if state:
clientpool.append(client)
# self.SendData("Welcome to WebSocket!\nThis messsage is from server!",client)
return True
#接收客户端发送过来的消息,并且解包
def RecvData(self,nNum,client):
try:
pData = client.recv(nNum)
fi = open(r"C:\Users\Administrator\Desktop\temp6.temp","wb")
fi.write(pData)
fi.close()
if not len(pData):
return False
except:
return False
else:
code_length = ord(pData[1]) & 127
if code_length == 126:
masks = pData[4:8]
data = pData[8:]
elif code_length == 127:
masks = pData[10:14]
data = pData[14:]
else:
masks = pData[2:6]
data = pData[6:]
raw_str = ""
i = 0
for d in data:
print ord(masks[i%4])
raw_str += chr(ord(d) ^ ord(masks[i%4]))
i += 1
return raw_str
#这算是客户端一个循环接受数据并且处理数据的线程
def DoRemoteCommand(self,connection):
while 1:
szBuf = self.RecvData(65550,connection) |
if(szBuf == False):
try :
clientpool.remove(connection)
for connect in clientpool:
self.SendData(str(connection.getpeernam | e())+" quit!",connect)
except ValueError:pass
break
else:
head = '\x81'
if len(szBuf) < 126:
head += struct.pack('B', len(szBuf))
elif len(szBuf) <= 0xFFFF:
head += struct.pack('!BH', 126, len(szBuf))
else:
head += struct.pack('!BQ', 127, len(szBuf))
# while 1:
# for connect in clientpool:
# connect.sendall(head+szBuf)
# sleep(5)
for connect in clientpool:
connect.sendall(head+szBuf)
#打包发送数据给客户端
def SendData(self,pData,client):
if(pData == False):
return False
else:
pData = str(pData)
token = "\x81"
length = len(pData)
if length < 126:
token += struct.pack("B", length)
elif length <= 0xFFFF:
token += struct.pack("!BH", 126, length)
else:
token += struct.pack("!BQ", 127, length)
pData = '%s%s' % (token,pData)
client.send(pData)
return True
if __name__ == '__main__':
websocket = InitWebSocketServer() |
sebastian-software/jasy | jasy/vcs/Repository.py | Python | mit | 3,416 | 0.001756 | #
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
import os
import jasy.core.Console as Console
import jasy.core.Util as Util
import jasy.vcs.Git as Git
import jasy.vcs.Svn as Svn
def isUrl(url):
"""
Figures out whether the given string is a valid Git repository URL.
:param url: URL to the repository
:type url: string
"""
return Git.isUrl(url)
def getType(url):
"""
Returns repository type of the given URL.
:param url: URL to the repository
:type url: string
"""
if Git.isUrl(url):
return "git"
else:
return None
def getTargetFolder(url, version=None):
"""
Returns the target folder name based on the URL and version using SHA1 checksums.
:param url: URL to the repository
:type url: string
:param version: Version to use
:type url: string
"""
if Git.isUrl(url):
version = Git.expandVersion(version)
folder = url[url.rindex("/") + 1:]
if folder.endswith(".git"):
folder = folder[:-4]
identifier = "%s@%s" % (url, version)
version = version[version.rindex("/") + 1:]
hash = Util.generateChecksum(identifier)
return "%s-%s-%s" % (folder, version, hash)
def update(url, version=None, path=None, update=True):
"""
Clones the given repository URL (optionally with overriding/update features)
:param url: URL to the repository
:type url: string
:param version: Version to clone
:type url: string
:param version: Destination path
:type url: string
:param version: Eneable/disable update functionality
:type ur | l: string
"""
revision = None
if Git.isUrl(url):
| version = Git.expandVersion(version)
revision = Git.update(url, version, path, update)
return revision
def getRevision(path=None):
"""Returns the current revision of the repository in the given path."""
old = os.getcwd()
revision = None
if path is not None:
os.chdir(path)
while True:
if os.path.exists(".git"):
revision = Git.getBranch(path) + "-" + Git.getShortRevision(path)
break
elif os.path.exists(".svn"):
revision = Svn.getBranch(path) + "-" + Svn.getRevision(path)
break
cur = os.getcwd()
os.chdir(os.pardir)
if cur == os.getcwd():
break
os.chdir(old)
return revision
def clean(path=None):
"""
Cleans repository from untracked files.
:param url: Path to the local repository
:type url: string
"""
old = os.getcwd()
Console.info("Cleaning repository (clean)...")
Console.indent()
if path:
os.chdir(path)
if os.path.exists(".git"):
Git.cleanRepository()
os.chdir(old)
Console.outdent()
def distclean(path=None):
"""
Cleans repository from untracked and ignored files. This method is pretty agressive in a way that it deletes all non
repository managed files e.g. external folder, uncommitted changes, unstaged files, etc.
:param url: Path to the local repository
:type url: string
"""
old = os.getcwd()
Console.info("Cleaning repository (distclean)...")
Console.indent()
if path:
os.chdir(path)
if os.path.exists(".git"):
Git.distcleanRepository()
os.chdir(old)
Console.outdent()
|
pombredanne/MOG | nova/conductor/manager.py | Python | apache-2.0 | 39,345 | 0.000686 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import copy
from nova.api.ec2 import ec2utils
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
from nova.image import glance
from nova import manager
from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
# Instead of having a huge list of arguments to instance_update(), we just
# accept a dict of fields to update and use this whitelist to validate it.
allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'power_state', 'access_ip_v4', 'access_ip_v6',
'launched_at', 'terminated_at', 'host', 'node',
'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata', 'updated_at'
]
# Fields that we want to convert back into a datetime object.
datetime_fields = ['launched_at', 'terminated_at', 'updated_at']
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
RPC_API_VERSION = '1.58'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.compute_task_mgr = ComputeTaskManager()
self.quotas = quota.QUOTAS
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def create_rpc_dispatcher(self, *args, **kwargs):
kwargs['additional_apis'] = [self.compute_task_mgr]
return super(ConductorManager, self).create_rpc_dispatcher(*args,
**kwargs)
@property
def network_api(self):
# NOTE(danms): We need to instantiate our network_api on first use
# to avoid the circular dependency that exists between our init
# and network_api's
if self._network_api is None:
self._network_api = network.API()
return self._network_api
@property
def compute_api(self):
if self._compute_api is None:
self._compute_api = compute_api.API()
return self._compute_api
def ping(self, context, arg):
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# now a part of the base rpc API.
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@rpc_common.client_exceptions(KeyError, ValueError,
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
def instance_update(self, context, instance_uuid,
updates, service=None):
for key, value in updates.iteritems():
if key not in a | llowed_updates:
| LOG.error(_("Instance update attempted for "
"'%(key)s' on %(instance_uuid)s"),
{'key': key, 'instance_uuid': instance_uuid})
raise KeyError("unexpected update keyword '%s'" % key)
if key in datetime_fields and isinstance(value, basestring):
updates[key] = timeutils.parse_strtime(value)
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get(self, context, instance_id):
return jsonutils.to_primitive(
self.db.instance_get(context, instance_id))
@rpc_common.client_exceptions(exception.InstanceNotFound)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return jsonutils.to_primitive(
self.db.instance_get_by_uuid(context, instance_uuid,
columns_to_join))
# NOTE(hanlind): This method can be removed in v2.0 of the RPC API.
def instance_get_all(self, context):
return jsonutils.to_primitive(self.db.instance_get_all(context))
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
if node is not None:
result = self.db.instance_get_all_by_host_and_node(
context.elevated(), host, node)
else:
result = self.db.instance_get_all_by_host(context.elevated(), host,
columns_to_join)
return jsonutils.to_primitive(result)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
@rpc_common.client_exceptions(exception.MigrationNotFound)
def migration_get(self, context, migration_id):
migration_ref = self.db.migration_get(context.elevated(),
migration_id)
return jsonutils.to_primitive(migration_ref)
# NOTE(comstud): This method is now deprecated and can be removed in
# version v2.0 of the RPC API
def migration_get_unconfirmed_by_dest_compute(self, context,
confirm_window,
dest_compute):
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
return jsonutils.to_primitive(migrations)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
migrations = self.db.migration_get_in_progress_by_host_and_node(
context, host, node)
return jsonutils.to_primitive(migrations)
# NOTE(comstud): This method can |
ken0nek/Software2 | 140519/test-student.py | Python | mit | 393 | 0.007634 | #-*- coding:utf-8 -*-
class Student:
def __init__(self, name):
self.name = name
def setGrade(self, grade):
| sel | f.grade = grade
def main():
s1 = Student("yamada")
s2 = Student("suzuki")
s1.setGrade(1)
s2.setGrade(2)
format = "student name = {0} ({1})"
print format.format(s1.name, s1.grade)
print format.format(s2.name, s2.grade)
main()
|
HewlettPackard/python-proliant-sdk | examples/Redfish/ex15_set_uid_light.py | Python | apache-2.0 | 2,217 | 0.014885 | # Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _redfishobject import RedfishObject
from redfish.rest.v1 import ServerDownOrUnreachableError
def ex15_set_uid_light(redfishobj, uid):
sys.stdout.write("\nEXAMPLE 15: Set UID Light on or off\n")
instances = redfishobj.search_for_type("ComputerSystem.")
for instance in instances:
body = dict()
if uid:
body["IndicatorLED"] = "Lit"
else:
body["IndicatorLED"] = "Off"
response = redfishobj.redfish_patch(instance["@odata.id"], body)
redfishobj.error_handler(response)
if __name__ == "__main__ | ":
# When running on the server locally use the following commented values
# iLO_htt | ps_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex15_set_uid_light(REDFISH_OBJ, True)
|
phase-dev/phase | libphase/dialogs.py | Python | gpl-3.0 | 3,325 | 0.032782 | """
Copyright 2014
This file is part of Phase.
Phase is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Phase is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Phase. If not, see <http://www.gnu.org/licenses/>.
"""
from gi.repository import Gtk
def warning(title,message):
dialog= Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL| Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.NONE,
None)
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.add_button("OK",Gtk.ResponseType.OK)
dialog.set_position(Gtk.WindowPosition.CENTER)
dialog.set_title(title)
dialog.set_markup(message)
response=dialog.run()
dialog.destroy()
return response
def password_question(title,question):
dialog= Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL| Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.NONE,
None)
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.add_button("Cancel",Gtk.ResponseType.CANCEL)
dialog.add_button("OK",Gtk.ResponseType.OK)
dialog.set_position(Gtk.WindowPosition.CENTER)
dialog.set_markup(question)
dialog.set_title(title)
entry=Gtk.Entry()
entry.set_visibility(False)
label=Gtk.Label("Password:")
alignment=Gtk.Alignment()
alignment.set_padding(0,0,75,0)
box=Gtk.Box()
box.add(label)
box.add(entry)
alignment.add(box)
dialog.vbox.add(alignment)
dialog.vbox.show_all()
response=dialog.run()
password=entry.get_text()
dialog.destroy()
return response,password
def disclaimer():
dialog= Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL| Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.NONE,
None)
dialog.add_button("I Do Not Accept",Gtk.ResponseType.CANCEL)
dialog.add_button("I Accept",Gtk.ResponseType.OK)
dialog.set_position(Gtk.WindowPosition.CENTER)
dialog.set_title("Disclaimer")
dialog.set_markup("Phase is a penetration testing tool and therefore is to be used only against systems where the user has authorisation. Usage of Phase for attacking targets without prior mutual consent is illegal. It is the end user's responsibility to obey all applicable laws. The developers assume no liability and are not responsible for any misuse or damage caused b | y this software")
response=dialog.run()
dialog.destroy()
return response
def exit_warning():
dialog= Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL| Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.NONE,
None)
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.add_button("Close Without Saving",0)
dialog.add_button("Cancel",1)
dialog.add_button("Save",2)
dialog.set_position(Gtk.WindowPosition.CENTER)
dialog.set_title("Save and Exit?")
dialog. | set_markup("<b>Save and Exit?</b>\n\nIf you don't save, all changes will be permanently lost.")
response=dialog.run()
dialog.destroy()
return response
|
lehmannro/translate | storage/poxliff.py | Python | gpl-2.0 | 14,996 | 0.001467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""XLIFF classes specifically suited for handling the PO representation in
XLIFF.
This way the API supports plurals as if it was a PO file, for example.
"""
from translate.storage import base, lisa, poheader, xliff
from translate.storage.placeables import general
from translate.misc.multistring import multistring
from lxml import etree
import re
def hasplurals(thing):
if not isinstance(thing, multistring):
return False
return len(thing.strings) > 1
class PoXliffUnit(xliff.xliffunit):
"""A class to specifically handle the plural units created from a po file."""
rich_parsers = general.parsers
def __init__(self, source=None, empty=False, encoding="UTF-8"):
self._rich_source = None
self._rich_target = None
self.units = []
if empty:
return
if not hasplurals(source):
super(PoXliffUnit, self).__init__(source)
return
self.xmlelement = etree.Element(self.namespaced("group"))
self.xmlelement.set("restype", "x-gettext-plurals")
self.setsource(source)
def __eq__(self, other):
if isinstance(other, PoXliffUnit):
if len(self.units) != len(other.units):
return False
if not super(PoXliffUnit, self).__eq__(other):
return False
for i in range(len(self.units)-1):
if not self.units[i+1] == other.units[i+1]:
return False
return True
if len(self.units) <= 1:
if isinstance(other, lisa.LISAunit):
return super(PoXliffUnit, self).__eq__(other)
else:
return self.source == other.source and self.target == other.target
return False
#XXX: We don't return language nodes correctly at the moment
# def getlanguageNodes(self):
# | if not self.hasplural():
# return super(PoXliffUnit, self).getlanguageNodes()
# else:
# return self.units[0].getlanguageNodes()
def setsource(self, source, sourcelang="en"):
# TODO: consider changing from plural to singular, etc.
self._rich_source = None
if not hasplurals(source):
super(PoXliffUnit, self).setsource(source, sourcelang)
| else:
target = self.target
for unit in self.units:
try:
self.xmlelement.remove(unit.xmlelement)
except xml.dom.NotFoundErr:
pass
self.units = []
for s in source.strings:
newunit = xliff.xliffunit(s)
# newunit.namespace = self.namespace #XXX?necessary?
self.units.append(newunit)
self.xmlelement.append(newunit.xmlelement)
self.target = target
# We don't support any rich strings yet
multistring_to_rich = base.TranslationUnit.multistring_to_rich
rich_to_multistring = base.TranslationUnit.rich_to_multistring
rich_source = base.TranslationUnit.rich_source
rich_target = base.TranslationUnit.rich_target
def getsource(self):
if not self.hasplural():
return super(PoXliffUnit, self).getsource()
else:
strings = []
strings.extend([unit.source for unit in self.units])
return multistring(strings)
source = property(getsource, setsource)
def settarget(self, text, lang='xx', append=False):
self._rich_target = None
if self.gettarget() == text:
return
if not self.hasplural():
super(PoXliffUnit, self).settarget(text, lang, append)
return
if not isinstance(text, multistring):
text = multistring(text)
source = self.source
sourcel = len(source.strings)
targetl = len(text.strings)
if sourcel < targetl:
sources = source.strings + [source.strings[-1]] * (targetl - sourcel)
targets = text.strings
id = self.getid()
self.source = multistring(sources)
self.setid(id)
elif targetl < sourcel:
targets = text.strings + [""] * (sourcel - targetl)
else:
targets = text.strings
for i in range(len(self.units)):
self.units[i].target = targets[i]
def gettarget(self):
if self.hasplural():
strings = [unit.target for unit in self.units]
if strings:
return multistring(strings)
else:
return None
else:
return super(PoXliffUnit, self).gettarget()
target = property(gettarget, settarget)
def addnote(self, text, origin=None, position="append"):
"""Add a note specifically in a "note" tag"""
if isinstance(text, str):
text = text.decode("utf-8")
note = etree.SubElement(self.xmlelement, self.namespaced("note"))
note.text = text
if origin:
note.set("from", origin)
for unit in self.units[1:]:
unit.addnote(text, origin)
def getnotes(self, origin=None):
#NOTE: We support both <context> and <note> tags in xliff files for comments
if origin == "translator":
notes = super(PoXliffUnit, self).getnotes("translator")
trancomments = self.gettranslatorcomments()
if notes == trancomments or trancomments.find(notes) >= 0:
notes = ""
elif notes.find(trancomments) >= 0:
trancomments = notes
notes = ""
trancomments = trancomments + notes
return trancomments
elif origin in ["programmer", "developer", "source code"]:
devcomments = super(PoXliffUnit, self).getnotes("developer")
autocomments = self.getautomaticcomments()
if devcomments == autocomments or autocomments.find(devcomments) >= 0:
devcomments = ""
elif devcomments.find(autocomments) >= 0:
autocomments = devcomments
devcomments = ""
return autocomments
else:
return super(PoXliffUnit, self).getnotes(origin)
def markfuzzy(self, value=True):
super(PoXliffUnit, self).markfuzzy(value)
for unit in self.units[1:]:
unit.markfuzzy(value)
def marktranslated(self):
super(PoXliffUnit, self).marktranslated()
for unit in self.units[1:]:
unit.marktranslated()
def setid(self, id):
self.xmlelement.set("id", id)
if len(self.units) > 1:
for i in range(len(self.units)):
self.units[i].setid("%s[%d]" % (id, i))
def getlocations(self):
"""Returns all the references (source locations)"""
groups = self.getcontextgroups("po-reference")
references = []
for group in groups:
sourcefile = ""
linenumber = ""
for (type, text) in group:
if type == "sourcefile":
sourcefile = text
elif type == "linenumber":
linenumber = text
assert sourcefile
if linenumber:
sourcefile = sourcefile + ":" + linenumber
references.append(sourcefile)
return references
def getautomaticcomments(self):
|
kedz/cuttsum | trec2015/sbin/cross-validation/best-feats.py | Python | apache-2.0 | 1,444 | 0.006233 | import sys
import os
import pandas as pd
from collections import defaultdict
import numpy as np
dirname = sys.argv[1]
path = os.path.join(dirname, "weights.tsv")
with open(path ,"r") as f:
df = pd.read_csv(f, sep="\t")
df = df[df["iter"] == 5]
fc2r = defaultdict(list)
features = set()
for event, event_df in df.groupby("event"):
pos_df = event_df[event_df["class"] == "SELECT"]
event_df.loc[event_df["class"] == "SELECT", "rank"] = pos_df["weight"].argsort()
for _, row in event_df.loc[event_df["class"] == "SELECT"][["name", "weight", "rank"]].iterrows():
clazz = "SELECT"
feature = row["name"]
rank = row["rank"]
fc2r[(feature, clazz)].append(rank)
features.add(feature)
neg_df = event_df[event_df["class"] == "NEXT"]
event_df.loc[event_df["class"] == "NEXT", "rank"] = neg_df["weight"].argsort()
for _, row in event_df.loc[event_df["class"] == "NEXT"] | [["name", "weight", "rank"]].iterrows():
clazz = "NEXT"
feature = row["name"]
rank = row["rank"]
fc2r[(feature, clazz)].append(rank)
features.add(feature)
f2d = {}
for feature in features:
sel_u = np.mean(fc2r[(feature, "SELECT")])
next_u = np.mean(fc2r[(feature, "NEXT")])
diff = max(sel_u, next_u) - min( | sel_u, next_u)
f2d[feature] = diff
print
feat_diff = sorted(f2d.items(), key=lambda x: x[1])
for feat, diff in feat_diff[-50:]:
print feat
|
wettenhj/pyupdater-wx-demo | wxupdatedemo/tests/test_run_update_available.py | Python | apache-2.0 | 4,840 | 0.001653 | """
Test ability to run PyUpdaterWxDemo and confirm that an update is available.
We don't want this test to be dependent on having a
client_config.py (created by pyupdater init), so
we set the WXUPDATEDEMO_TESTING environment variable
before loading the wxupdatedemo.config module or
the run module.
"""
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
import unittest
import os
import sys
import gzip
import json
import shutil
import tempfile
import ed25519
import six
import wx
from wxupdatedemo import __version__
APP_NAME = 'PyUpdaterWxDemo'
CURRENT_VERSION = '0.0.1'
UPDATE_VERSION = '0.0.2'
# PyUpdater version format is:
# Major.Minor.Patch.[Alpha|Beta|Stable].ReleaseNumber
# where Alpha=0, Beta=1 and Stable=2
UPDATE_VERSION_PYU_FORMAT = '%s.2.0' % UPDATE_VERSION
VERSIONS = {
"updates": {
APP_NAME: {
UPDATE_VERSION_PYU_FORMAT: {
"mac": {
"file_hash": "bd4bc8824dfd8240d5bdb9e46f21a86af4d6d1cc1486a2a99cc4b9724a79492b",
"filename": "%s-mac-%s.tar.gz" % (APP_NAME, UPDATE_VERSION),
"file_size": 30253628
},
"win": {
"file_hash": "b1399df583bce4ca45665b3960fd918a316d86c997d6c33556eda1cc2b555e59",
"filename": "%s-win-%s.zip" % (APP_NAME, UPDATE_VERSION),
"file_size": 14132995
},
"nix32": {
"file_hash": "bd4bc8824dfd8240d5bdb9e46f21a86af4d6d1cc1486a2a99cc4b9724a79492b",
"filename": "%s-nix32-%s.tar.gz" % (APP_NAME, UPDATE_VERSION),
"file_size": 30253628
},
"nix64": {
"file_hash": "bd4bc8824dfd8240d5bdb9e46f21a86af4d6d1cc1486a2a99cc4b9724a79492b",
"filename": "%s-nix64-%s.tar.gz" % (APP_NAME, UPDATE_VERSION),
"file_size": 30253628
| }
}
}
},
"latest": {
APP_NAME: {
"stable": {
"mac": UPDATE_VERSION_PYU_FORMAT,
"win": UPDATE_VERS | ION_PYU_FORMAT,
"nix32": UPDATE_VERSION_PYU_FORMAT,
"nix64": UPDATE_VERSION_PYU_FORMAT
}
}
}
}
# Generated by "pyupdater keys -c":
# These keys are only used for automated testing!
# DO NOT SHARE YOUR PRODUCTION PRIVATE_KEY !!!
PUBLIC_KEY = "12y2oHGB2oroRQJkR73CJNaFeQy776oXsUrqWaAEiZU"
PRIVATE_KEY = "nHgoNwSmXSDNSMqQTtdAEmi/6otajiNYJEXESvAO8dc"
KEYS = {
"app_public": "MIBCEwFh7AcaxJrHKIgYqAmZ9YX16NXVHLi+EdDmtYc",
"signature": "1YTDuJauq7qVFUrKPHGMMESllJ4umo6u5r9pEgVmvlxgXi3qGXnKWo2LG94+oosN3KiO8DlxOmyfuwaaQKtFCw"
}
class RunTester(unittest.TestCase):
"""
Test ability to run PyUpdaterWxDemo and confirm that an update is available.
"""
def __init__(self, *args, **kwargs):
super(RunTester, self).__init__(*args, **kwargs)
self.app = None
self.fileServerDir = None
def setUp(self):
tempFile = tempfile.NamedTemporaryFile()
self.fileServerDir = tempFile.name
tempFile.close()
os.mkdir(self.fileServerDir)
os.environ['PYUPDATER_FILESERVER_DIR'] = self.fileServerDir
privateKey = ed25519.SigningKey(PRIVATE_KEY.encode('utf-8'),
encoding='base64')
signature = privateKey.sign(six.b(json.dumps(VERSIONS, sort_keys=True)),
encoding='base64').decode()
VERSIONS['signature'] = signature
keysFilePath = os.path.join(self.fileServerDir, 'keys.gz')
with gzip.open(keysFilePath, 'wb') as keysFile:
keysFile.write(json.dumps(KEYS, sort_keys=True))
versionsFilePath = os.path.join(self.fileServerDir, 'versions.gz')
with gzip.open(versionsFilePath, 'wb') as versionsFile:
versionsFile.write(json.dumps(VERSIONS, sort_keys=True))
os.environ['WXUPDATEDEMO_TESTING'] = 'True'
from wxupdatedemo.config import CLIENT_CONFIG
self.clientConfig = CLIENT_CONFIG
self.clientConfig.PUBLIC_KEY = PUBLIC_KEY
self.clientConfig.APP_NAME = APP_NAME
def test_run_update_available(self):
"""
Test ability to run PyUpdaterWxDemo and confirm that an update is available.
"""
self.assertEqual(__version__, CURRENT_VERSION)
from run import Run
self.app = Run(argv=['RunTester', '--debug'],
clientConfig=self.clientConfig)
self.assertEqual(self.app.statusBar.GetStatusText(),
"Update available but application is not frozen.")
sys.stderr.write("We can only restart a frozen app!\n")
def tearDown(self):
"""
Destroy the app
"""
if self.app:
self.app.frame.Hide()
self.app.OnCloseFrame(wx.PyEvent())
self.app.frame.Destroy()
del os.environ['PYUPDATER_FILESERVER_DIR']
del os.environ['WXUPDATEDEMO_TESTING']
shutil.rmtree(self.fileServerDir)
|
cwisecarver/osf.io | api/preprint_providers/serializers.py | Python | apache-2.0 | 3,317 | 0.000904 | from rest_framework import serializers as ser
from api.base.utils import absolute_reverse
from api.base.serializers import JSONAPISerializer, LinksField, RelationshipField, ShowIfVersion, DevOnly
class PreprintProviderSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'name',
'description',
'id',
'domain',
'domain_redirect_enabled'
])
name = ser.CharField(required=True)
description = ser.CharField(required=False)
id = ser.CharField(max_length=200, source='_id')
advisory_board = ser.CharField(required=False)
example = ser.CharField(required=False, allow_null=True)
domain = ser.CharField(required=False, allow_null=False)
domain_redirect_enabled = ser.BooleanField(required=True)
subjects_acceptable = ser.JSONField(required=False, allow_null=True)
footer_links = ser.CharField(required=False)
share_source = ser.CharField(read_only=True)
allow_submissions = DevOnly(ser.BooleanField(read_only=True))
additional_providers = DevOnly(ser.ListField(child=ser.CharField(), read_only=True))
preprints = RelationshipField(
related_view='preprint_providers:preprints-list',
related_view_kwargs={'provider_id': '<_id>'}
)
taxonomies = RelationshipField(
related_view='preprint_providers:taxonomy-list',
related_view_kwargs={'provider_id': '<_id>'}
)
licenses_acceptable = RelationshipField(
related_view='preprint_providers:license-list',
related_view_kwargs={'provider_id': '<_id>'}
)
links = LinksField({
'self': 'get_absolute_url',
'preprints': 'get_preprints_url',
'external_url': 'get_external_url'
})
# Deprecated fields
header_text = ShowIfVersion(
ser.CharField(required=False, default=''),
min_version='2.0', max_version='2.3'
)
banner_path = ShowIfVersion(
ser.CharField(required=False, default=''),
min_version='2.0', max_version='2.3'
)
logo_path = ShowIfVersion(
ser.CharField(required=False, default=''),
min_version='2.0', max_version='2.3'
)
email_contact = ShowIfVersion(
ser.CharField(required=False, allow_null=True),
min_version='2.0', max_version='2.3'
)
email_support = ShowIfVersion(
ser.CharField(required=False, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_twitter = ShowIfVersion(
ser.CharField(required=False, allow_nul | l=True),
min_version='2.0', max_version='2.3'
)
social_facebook = ShowIfVersion(
ser.CharField(required=False, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_instagram | = ShowIfVersion(
ser.CharField(required=False, allow_null=True),
min_version='2.0', max_version='2.3'
)
class Meta:
type_ = 'preprint_providers'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
def get_preprints_url(self, obj):
return absolute_reverse('preprint_providers:preprints-list', kwargs={
'provider_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_external_url(self, obj):
return obj.external_url
|
fengsp/flask-snippets | templatetricks/override_autoescaped.py | Python | bsd-3-clause | 570 | 0.005263 | # -*- coding: utf-8 -*-
"""
templatetricks.override_autoescaped
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Override which templates are autoescaped
http://flask.pocoo.org/snippets/41/
"""
import os
import sys
sys.path. | insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import Flask
class JHtmlEscapingFlask(Flask):
def select_jinja_autoescape(self, filename):
if filename.endswith('.jhtml'):
return True
return Flask.s | elect_jinja_autoescape(self, filename)
app = JHtmlEscapingFlask(__name__)
|
uogbuji/akara | test/test_pipeline.py | Python | apache-2.0 | 3,268 | 0.009486 | import hashlib
import urllib2
from test_services import GET
from akara import pipeline
def test_pipeline_missing_stages():
for stages in (None, [], ()):
try:
pipeline.register_pipeline("blah", stages=stages)
except TypeError:
pass
else:
raise AssertError("allowed missing stages: %r" % stages)
def test_flatten():
result = list(pipeline._flatten_kwargs_values(dict(a=["1","2","3"])))
assert result == [("a","1"), ("a","2"), ("a","3")], result
result = list(pipeline._flatten_kwargs_values(dict(a=["1","2","3"], b="9")))
result.sort()
assert result == [("a","1"), ("a","2"), ("a","3"), ("b","9")], result
def test_stage_query_args():
stage = pipeline.Stage("http://example.com", [("a", ["1", "2"]), ("b", "9")])
assert stage.query_string == "a=1&a=2&b=9", stage.query_string
def test_stage_kwargs():
stage = pipeline.Stage("http://example.com", a=["1", "2"], b="9")
assert (stage.query_string == "a=1&a=2&b=9" or
stage.query_string == "b=9&a=1&a=2"), stage.query_string
def test_stage_raw_query():
stage = pipeline.Stage("http://example.com", query_string="=j")
assert stage.query_string == "=j"
def test_stage_error_combinations():
# Not allowed to mix inputs
def t1():
pipeline.Stage("http://example.com", [("a", "b")], query_string="=j")
def t2():
pipeline.Stage("http://example.com", [("a", "b")], a=3)
def t3():
pipeline.Stage("http://example.com", query_string="=j", a=3)
for t in (t1, t2, t2):
try:
t()
except TypeError:
pass
else:
raise AssertionError("expected to fail")
def test_hash_encode():
result = GET("hash_encode", data="This is a test")
expected = hashlib.md5("secretThis is a test").digest().encode("base64")
assert result == expected, (result, expected)
def test_hash_encode_rot13():
result = GET("hash_encode_rot13", data="This is another test")
expected = hashlib.md5("secretThis is another test").digest().encode("base64").encode("rot13")
assert result == expected, (result, expected)
def test_get_hash():
result = GET("get_hash")
expected = hashlib.md5("Andrew").digest().encode("base64")
assert result == expected, (result, expected)
def test_get_hash2():
result = GET("get_hash", dict(text="Sara Marie"))
expected = hashlib.md5("Sara Marie").digest().encode("base64")
assert result == expected, (result, expected)
def test_broken_pipeline1():
try:
result = GET("broken_pipeline1")
raise AssertionError("should not get here")
except urllib2.HTTPError, err:
assert err.code == 500
msg = err.read()
assert "Broken internal pipeline" in msg, msg
def test_broken_pipeline2():
try:
result = GET("broken_pipeline2", data="feed the pipeline")
raise AssertionError("should not get here")
except urllib2.HTTPError, err:
assert err.code == 500, err.code
msg = err.read()
assert "Broken internal pipeline" in msg, msg
def test_registry_size():
result = GET("test_count_registry")
assert i | nt(result) > 30, "What?! Did you | remove elements from the registry?"
|
atztogo/phonopy | phonopy/qha/core.py | Python | bsd-3-clause | 40,784 | 0.00076 | """Core routines for QHA."""
# Copyright (C) 2012 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from phonopy.qha.eos import fit_to_eos, get_eos
from phonopy.units import Avogadro, EVAngstromToGPa, EvTokJmol
class BulkModulus:
"""Bulk modulus class.
This class is used to calculate bulk modulus only from temperature
independent energy input.
"""
def __init__(self, volumes, energies, eos="vinet"):
"""Init method.
volumes : array_like
Unit cell volumes where energies are obtained.
shape=(volumes, ), dtype='double'.
energies : array_like
Energies obtained at volumes.
shape=(volumes, ), dtype='double'.
eos : str
Identifier of equation of states function.
"""
self._volumes = volumes
if np.array(energies).ndim == 1:
self._energies = energies
else:
self._energies = energies[0]
self._eos = get_eos(eos)
self._energy = None
self._bulk_modulus = None
self._b_prime = None
try:
(
self._energy,
self._bulk_modulus,
self._b_prime,
self._volume,
) = fit_to_eos(volumes, self._energies, self._eos)
except TypeError:
msg = ['Failed to fit to "%s" equation of states.' % eos]
if len(volumes) < 4:
msg += ["At least 4 volume points are needed for the fitting."]
msg += ["Careful choice of volume points is recommended."]
raise RuntimeError("\n".join(msg))
@property
def bulk_modulus(self):
"""Return bulk modulus."""
return self._bulk_modulus
def get_bulk_modulus(self):
"""Return bulk modulus."""
warnings.warn(
"BulkModulus.get_bulk_modulus() is deprecated."
"Use BulkModulus.bulk_modulus attribute.",
DeprecationWarning,
)
return self.bulk_modulus
@property
def equilibrium_volume(self):
"""Return volume at equilibrium."""
return self._volume
def get_equilibrium_volume(self):
"""Return volume at equilibrium."""
warnings.warn(
"BulkModulus.get_equilibrium_volume() is deprecated."
"Use BulkModulus.equilibrium_volume attribute.",
DeprecationWarning,
)
return self.equilibrium_volume
@property
def b_prime(self):
"""Return fitted parameter B'."""
return self._b_prime
def get_b_prime(self):
"""Return fitted parameter B'."""
warnings.warn(
"BulkModulus.get_b_prime() is deprecated."
"Use BulkModulus.b_prime attribute.",
DeprecationWarning,
)
return self._b_prime
@property
def energy(self):
"""Return fitted parameter of energy."""
return self._energy
def get_energy(self):
"""Return fitted parameter of energy."""
warnings.warn(
"BulkModulus.get_energy() is deprecated."
"Use BulkModulus.energy attribute.",
DeprecationWarning,
)
return self._energy
def get_parameters(self):
"""Return fitted parameters."""
return (self._energy, self._bulk_modulus, self._b_prime, self._volume)
def get_eos(self):
"""Return EOS function as a python method."""
warnings.warn("BulkModulus.get_eos() is deprecated.", DeprecationWarning)
return self._eos
def plot(self):
"""Plot fitted EOS curve."""
import matplotlib.pyplot as plt
ep = self.get_parameters()
vols = self._volumes
volume_points = np.linspace(min(vols), max(vols), 201)
fig, ax = plt.subplots()
ax.plot(volume_points, self._eos(volume_points, *ep), "r-")
ax.plot(vols, self._energies, "bo", markersize=4)
return plt
class QHA:
"""Quasi harmonic approximation class."""
def __init__(
self,
volumes, # angstrom^3
electronic_energies, # eV
temperatures, # K
cv, # J/K/mol
entropy, # J/K/mol
fe_phonon, # kJ/mol
eos="vinet",
t_max=None,
energy_plot_factor=None,
):
"""Init method.
Parameters
----------
volumes: array_like
Unit cell volumes (V) in angstrom^3.
dtype='double'
shape=(volumes,)
electronic_energies: array_like
Electronic energies (U_el) or electronic free energies (F_el) in eV.
It is assumed as formar if ndim==1 and latter if ndim==2.
dtype='double'
shape=(volumes,) or (temperatuers, volumes)
temperatures: array_like
Temperatures ascending order (T) in K.
dtype='double'
shape=(temperatures,)
cv: array_like
Phonon Heat capacity at constant volume in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
entropy: array_like
Phonon entropy at constant volume (S_ph) in J/K/mol.
dtype='double'
shape=(temperatuers, volumes)
fe_phonon: array_like
Phonon Helmholtz free energy (F_ph) in kJ/mol.
dtype='double'
shape=(temperatuers, volumes)
eos: str
Equation of state used for fitting F vs V.
'vinet', 'murnag | han' or 'birch_murnaghan'.
t_max: float
Maximum temperature to be calculated. This has to be not
| greater than the temperature of the third element from the
end of 'temperatre' elements. If max_t=None, the temperature
of the third element from the end is used.
energy_plot_factor: float
This value is multiplied to energy like values only in plotting.
"""
self._volumes = np.array(volumes)
self._electronic_energies = np.array(electronic_energies)
self._all_temperatures = np.array(temperatures)
self._cv = np.array(cv)
self._entropy = np.array(entropy)
self._fe_phonon = np.array(fe_phonon) / EvTokJmol
self._eos = get_eos(eos)
self._t_max = t_max
self._energy_plot_factor = energy_plot_factor
self._temperatures = None
self._equiv_volumes = None
self._equiv_energies = None
self._equiv_bulk_modulus = None
self._equiv_parameters = None
self._free_energies = None
self._num_elems = None
self._thermal_expansions |
hpcloud-mon/monasca-events-api | monasca_events_api/v2/common/schemas/stream_definition_request_body_schema.py | Python | apache-2.0 | 1,965 | 0 | # Copyright 2015 Hewlett-Packard
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import voluptuous
from monasca_events_api.v2.common.schemas import exceptions
LOG = log.getLogger(__name__)
MILLISEC_PER_DAY = 86400000
MILLISEC_PER_WEEK = MILLISEC_PER_DAY * 7
stream_definition_schema = {
voluptuous.Required('name'): voluptuous.All(voluptuous.Any(str, unicode),
voluptuous.Length(max=140)),
voluptuous.Required('select'): voluptuous.All(
voluptuous.Any(list)),
voluptuous.Required('group_by'): voluptuous.All(
voluptuous.Any(list)),
voluptuous.Required('fire_criteria'): voluptuous.All(
voluptuous.Any(list)),
voluptuous.Required('expiration'): voluptuous.All(
voluptuous.Any(int), voluptuous.Range(min=0, max=MILLISEC_PER_WEEK)),
voluptuous.Optional('fire_actions'): voluptuous.All(
voluptuous.Any([str], [unicode]), voluptuous.Length(max=400)),
voluptuous.Optional('expire_actions'): voluptuous.All(
voluptuous.Any([str], [unicode]), voluptuous.Length(max=40 | 0)),
voluptuou | s.Optional('actions_enabled'): bool}
request_body_schema = voluptuous.Schema(stream_definition_schema,
required=True, extra=True)
def validate(msg):
try:
request_body_schema(msg)
except Exception as ex:
LOG.debug(ex)
raise exceptions.ValidationException(str(ex))
|
memsharded/conan | conans/server/launcher.py | Python | mit | 2,666 | 0.002626 | #!/usr/bin/python
import os
from conans import SERVER_CAPABILITIES, REVISIONS
from conans.paths import conan_expand_user
from conans.server.conf import get_server_store
from conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager
from conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager
from conans.server.migrate import migrate_and_get_server_config
from conans.server.plugin_loader import load_authentication_plugin
from conans.server.rest.server import ConanServer
from conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator
class ServerLauncher(object):
def __init__(self, force_migration=False):
self.force_migration = force_migration
user_folder = conan_expand_user("~")
server_folder = os.path.join(user_folder, '.conan_server')
server_config = migrate_and_get_server_config(user_folder, self.force_migration)
custom_auth = server_config.custom_authenticator
if custom_auth:
authenticator = load_authentication_plugin(server_folder, custom_auth)
else:
authenticator = BasicAuthenticator(dict(server_config.users))
authorizer = BasicAuthorizer(server_config.read_permissions,
server_config.write_permissions)
credentials_manager = JWTCredentialsManager(server_config.jwt_secret,
server_config.jwt_expire_time)
updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,
server_config.authorize_timeout)
server_store = get_server_store(server_config.disk_storage_path,
server_config.public_url,
| updown_auth_manager=updown_auth_manager)
server_capabilities = SERVER_CAPABILITIES
server_capabilities.append(REVISIONS)
self.server = ConanServer(server_config.port, credential | s_manager, updown_auth_manager,
authorizer, authenticator, server_store,
server_capabilities)
if not self.force_migration:
print("***********************")
print("Using config: %s" % server_config.config_filename)
print("Storage: %s" % server_config.disk_storage_path)
print("Public URL: %s" % server_config.public_url)
print("PORT: %s" % server_config.port)
print("***********************")
def launch(self):
if not self.force_migration:
self.server.run(host="0.0.0.0")
|
pcchenxi/baseline | baselines/test.py | Python | mit | 435 | 0.006897 | import tensorflow as tf
sess=tf.Session()
#First let's load meta graph and restore weights |
saver = tf.train.import_meta_graph('./baselines/ppo1/model/rl/model.cptk.meta')
saver.rest | ore(sess, './baselines/ppo1/model/rl/model.cptk')
graph = tf.get_default_graph()
value_w = graph.get_tensor_by_name("pi/vffinal/w:0")
# print(sess.run('pi/vffinal/w:0'))
summary_writer = tf.summary.FileWriter('./baselines/ppo1/log', sess.graph)
|
dickrd/cla_tool | setup.py | Python | apache-2.0 | 492 | 0 | from distutils.core import setup
from setuptools import find_packages
setup(
name='cla_tool',
version='1.2.0',
packages=fi | nd_packages(exclude=['res']),
url='https://github.com/dickrd/cla_tool',
license='Apache License 2.0',
author='DickRD',
author_email='dickdat | a7@gmail.com',
description='Chinese language analyze tools.',
scripts=['bin/cla'],
install_requires=['gensim', 'jieba', 'sklearn'],
tests_require=['pytest'],
test_suite="pytest"
)
|
alexforencich/python-ivi | ivi/agilent/agilentBase8590A.py | Python | mit | 2,338 | 0.005988 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590 import *
class agilentBase8590A(agilentBase8590):
"Agilent 8590A series IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilentBase8590A, self).__init__(*args, **kwargs)
self._identity_description = "Agilent 8590 series IVI spectrum ana | lyzer driver"
self._identity_sup | ported_instrument_models = ['8590A', '8590B', '8591A', '8592A', '8592B',
'8593A', '8594A', '8595A']
def _display_fetch_screenshot(self, format='bmp', invert=False):
if self._driver_operation_simulate:
return b''
#if format not in ScreenshotImageFormatMapping:
# raise ivi.ValueNotSupportedException()
#format = ScreenshotImageFormatMapping[format]
self._write("PRINT 1")
rtl = io.BytesIO(self._read_raw())
img = hprtl.parse_hprtl(rtl)
# rescale to get white background
# presuming background of (90, 88, 85)
img[:,:,0] *= 255/90
img[:,:,1] *= 255/88
img[:,:,2] *= 255/85
bmp = hprtl.generate_bmp(img)
return bmp
|
siosio/intellij-community | python/helpers/tests/generator3_tests/data/SkeletonGeneration/skeleton_not_regenerated_for_failed_module_on_same_generator_version/failing.py | Python | apache-2.0 | 97 | 0.010309 | # Actually a correct module to check | that we don't even at | tempt to regenerate a skeleton for it.
|
wingtk/gvsbuild | gvsbuild/projects/grpc.py | Python | gpl-2.0 | 2,081 | 0.000481 | # Copyright (C) 2016 - Yevgen Muntyan
# Copyright (C) 2016 - Ignacio Casal Quinteiro
# Copyright (C) 2016 - Arnavion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from gvsbuild.utils.base_builders import CmakeProject
from gvsbuild.utils.base_expanders import GitRepo
from gvsbuild.utils.base_project import Project, project_add
@project_add
| class Grpc(GitRepo, CmakeProject):
def __init__(self):
| Project.__init__(
self,
"grpc",
repo_url="https://github.com/grpc/grpc.git",
fetch_submodules=True,
tag="v1.12.0",
dependencies=["go", "nuget", "protobuf", "perl", "zlib", "nasm"],
patches=["0001-removing-extra-plugins.patch"],
)
def build(self):
CmakeProject.build(
self,
cmake_params="-DgRPC_ZLIB_PROVIDER=package -DgRPC_PROTOBUF_PROVIDER=package",
use_ninja=True,
out_of_source=False,
)
self.install(r".\third_party\boringssl\ssl\ssl.lib lib")
self.install(r".\third_party\boringssl\crypto\crypto.lib lib")
self.install(r".\gpr.lib lib")
self.install(r".\grpc.lib lib")
self.install(r".\grpc++.lib lib")
self.install(r".\grpc_cpp_plugin.exe bin")
self.install(r".\grpc_cpp_plugin.pdb bin")
self.install(r".\grpc_csharp_plugin.exe bin")
self.install(r".\grpc_csharp_plugin.pdb bin")
self.install(r".\LICENSE share\doc\grpc")
|
spacy-io/spaCy | spacy/lang/ru/lemmatizer.py | Python | mit | 6,165 | 0.000974 | from typing import Optional, List, Dict, Tuple
from thinc.api import Model
from ...pipeline import Lemmatizer
from ...symbols import POS
from ...tokens import Token
from ...vocab import Vocab
PUNCT_RULES = {"«": '"', "»": '"'}
class RussianLemmatizer(Lemmatizer):
_morph = None
def __init__(
self,
vocab: Vocab,
model: Optional[Model],
name: str = "lemmatizer",
*,
mode: str = "pymorphy2",
overwrite: bool = False,
) -> None:
super().__init__(vocab, model, name, mode=mode, overwrite=overwrite)
try:
from pymorphy2 import MorphAnalyzer
except ImportError:
raise ImportError(
"The Russian lemmatizer requires the pymorphy2 library: "
'try to fix it with "pip install pymorphy2"'
) from None
if RussianLemmatizer._morph is None:
RussianLemmatizer._morph = MorphAnalyzer()
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
string = token.text
univ_pos = token.pos_
morphology = token.morph.to_dict()
if univ_pos == "PUNCT":
return [PUNCT_RULES.get(string, string)]
if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"):
# Skip unchangeable pos
return [string.lower()]
analyses = self._morph.parse(string)
filtered_analyses = []
for analysis in analyses:
if not analysis.is_known:
# Skip suggested parse variant for unknown word for pymorphy
continue
analysis_pos, _ = oc2ud(str(analysis.tag))
if analysis_pos == univ_pos or (
analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN")
):
filtered_analyses.append(analysis)
if not len(filtered_analyses):
return [string.lower()]
if morphology is None or (len(morphology) == 1 and POS in morphology):
return list(set([analysis.normal_form for analysis in filtered_analyses]))
if univ_pos in ("ADJ", "DET", "NOUN", "PROPN"):
features_to_compare = ["Case", "Number", "Gender"]
elif univ_pos == "NUM":
features_to_compare = ["Case", "Gender"]
elif univ_pos == "PRON":
features_to_compare = ["Case", "Number", "Gender", "Person"]
else: # VERB
features_to_compare = [
"Aspect",
"Gender",
"Mood",
"Number",
"Tense",
"VerbForm",
"Voice",
]
analyses, filtered_analyses = filtered_analyses, []
for analysis in analyses:
_, analysis_morph = oc2ud(str(analysis.tag))
for feature in features_to_compare:
if (
feature in morphology
and feature in analysis_morph
and morphology[feature].lower() != analysis_morph[feature].lower()
):
break
else:
filtered_analyses.append(analysis)
if not len(filtered_analyses):
return [string.lower()]
return list(set([analysis.normal_form for analysis in filtered_analyses]))
def lookup_lemmatize(self, token: Token) -> List[str]:
string = token.text
analyses = self._morph.parse(string)
if len( | analyses) == 1:
return analyses[0].normal_form
return string
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
gram_map = {
"_POS": {
"ADJF": "ADJ",
"ADJS": "ADJ",
"ADVB": "ADV",
"Apro": "DET",
"COMP": "ADJ", # Can also be an ADV - unchangeable
"CONJ": "CCONJ", # Can also be a SCONJ - both unchangeable ones
"GRND": "VERB",
"INFN": "VERB | ",
"INTJ": "INTJ",
"NOUN": "NOUN",
"NPRO": "PRON",
"NUMR": "NUM",
"NUMB": "NUM",
"PNCT": "PUNCT",
"PRCL": "PART",
"PREP": "ADP",
"PRTF": "VERB",
"PRTS": "VERB",
"VERB": "VERB",
},
"Animacy": {"anim": "Anim", "inan": "Inan"},
"Aspect": {"impf": "Imp", "perf": "Perf"},
"Case": {
"ablt": "Ins",
"accs": "Acc",
"datv": "Dat",
"gen1": "Gen",
"gen2": "Gen",
"gent": "Gen",
"loc2": "Loc",
"loct": "Loc",
"nomn": "Nom",
"voct": "Voc",
},
"Degree": {"COMP": "Cmp", "Supr": "Sup"},
"Gender": {"femn": "Fem", "masc": "Masc", "neut": "Neut"},
"Mood": {"impr": "Imp", "indc": "Ind"},
"Number": {"plur": "Plur", "sing": "Sing"},
"NumForm": {"NUMB": "Digit"},
"Person": {"1per": "1", "2per": "2", "3per": "3", "excl": "2", "incl": "1"},
"Tense": {"futr": "Fut", "past": "Past", "pres": "Pres"},
"Variant": {"ADJS": "Brev", "PRTS": "Brev"},
"VerbForm": {
"GRND": "Conv",
"INFN": "Inf",
"PRTF": "Part",
"PRTS": "Part",
"VERB": "Fin",
},
"Voice": {"actv": "Act", "pssv": "Pass"},
"Abbr": {"Abbr": "Yes"},
}
pos = "X"
morphology = dict()
unmatched = set()
grams = oc_tag.replace(" ", ",").split(",")
for gram in grams:
match = False
for categ, gmap in sorted(gram_map.items()):
if gram in gmap:
match = True
if categ == "_POS":
pos = gmap[gram]
else:
morphology[categ] = gmap[gram]
if not match:
unmatched.add(gram)
while len(unmatched) > 0:
gram = unmatched.pop()
if gram in ("Name", "Patr", "Surn", "Geox", "Orgn"):
pos = "PROPN"
elif gram == "Auxt":
pos = "AUX"
elif gram == "Pltm":
morphology["Number"] = "Ptan"
return pos, morphology
|
SPIhub/hummingbird | src/backend/lcls.py | Python | bsd-2-clause | 28,140 | 0.00398 | # --------------------------------------------------------------------------------------
# Copyright 2016, Benedikt J. Daurer, Filipe R.N.C. Maia, Max F. Hantke, Carl Nettelblad
# Hummingbird is distributed under the terms of the Simplified BSD License.
# -------------------------------------------------------------------------
"""Translates between LCLS events and Hummingbird ones"""
from __future__ import print_function # Compatibility with python 2 and 3
import os
import logging
from backend.event_translator import EventTranslator
from backend.record import Record, add_record
import psana
import numpy
import datetime
from pytz import timezone
from . import ureg
from backend import Worker
import ipc
from hummingbird import parse_cmdline_args
_argparser = None
def add_cmdline_args():
global _argparser
from utils.cmdline_args import argparser
_argparser = argparser
group = _argparser.add_argument_group('LCLS', 'Options for the LCLS event translator')
group.add_argument('--lcls-run-number', metavar='lcls_run_number', nargs='?',
help="run number",
type=int)
group.add_argument('--lcls-number-of-frames', metavar='lcls_number_of_frames', nargs='?',
help="number of frames to be processed",
type=int)
# ADUthreshold for offline analysis
#group.add_argument('--ADUthreshold', metavar='ADUthreshold', nargs='?',
# help="ADU threshold",
# type=int)
# Hitscore threshold for offline analysis
#group.add_argument('--hitscore-thr', metavar='hitscore_thr', nargs='?',
# help="Hitscore threshold",
# type=int)
# Output directory for offline analysis
#group.add_argument('--out-dir', metavar='out_dir', nargs='?',
# help="Output directory",
# type=str)
# Reduce output from offline analysis
#group.add_argument('--reduced-output',
# help="Write only very few data to output file",
# action='store_true')
PNCCD_IDS = ['pnccdFront', 'pnccdBack']
ACQ_IDS = [('ACQ%i' % i) for i in range(1,4+1)]
class LCLSTranslator(object):
"""Translate between LCLS events and Hummingbird ones"""
def __init__(self, state):
self.timestamps = None
self.library = 'psana'
config_file = None
if('LCLS/PsanaConf' in state):
config_file = os.path.abspath(state['LCLS/PsanaConf'])
elif('LCLS' in state and 'PsanaConf' in state['LCLS']):
config_file = os.path.abspath(state['LCLS']['PsanaConf'])
if(config_file is not None):
if(not os.path.isfile(config_file)):
raise RuntimeError("Could not find [LCLS][PsanaConf]: %s" %
(config_file))
logging.info("Info: Found configuration file %s.", config_file)
psana.setConfigFile(config_file)
if 'LCLS/CalibDir' in state:
calibdir = state['LCLS/CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
elif('LCLS' in state and 'CalibDir' in state['LCLS']):
calibdir = state['LCLS']['CalibDir']
logging.info("Setting calib-dir to %s" % calibdir)
psana.setOption('psana.calib-dir', calibdir)
if('LCLS/DataSource' in state):
dsrc = state['LCLS/DataSource']
elif('LCLS' in state and 'DataSource' in state['LCLS']):
dsrc = state['LCLS']['DataSource']
else:
raise ValueError("You need to set the '[LCLS][DataSource]'"
" in the configuration")
cmdline_args = _argparser.parse_args()
self.N = cmdline_args.lcls_number_of_frames
if cmdline_args.lcls_run_number is not None:
dsrc += ":run=%i" % cmdline_args.lcls_run_number
# Cache times of events that shall be extracted from XTC (does not work for stream)
self.event_slice = slice(0,None,1)
if 'times' in state or 'fiducials' in state:
if not ('times' in state and 'fiducials' in state):
raise ValueError("Times or fiducials missing in state."
" Extraction of selected events expects both event identifiers")
if dsrc[:len('exp=')] != 'exp=':
raise ValueError("Extraction of events with given times and fiducials"
" only works when reading from XTC with index files")
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
self.times = state['times']
self.fiducials = state['fiducials']
self.i = 0
self.data_source = psana.DataSource(dsrc)
self.run = self.data_source.runs().next()
elif 'indexing' in state:
if dsrc[-len(':idx'):] != ':idx':
dsrc += ':idx'
if 'index_offset' in state:
self.i = state['index_offset'] / ipc.mpi.nr_event_readers()
else:
self.i = 0
self.data_source = psana.Da | taSource(dsrc)
self.run = self.data_source.runs().next()
self.timestamps = self.run.times()
if self.N is not None:
self.timestamps = self.timesta | mps[:self.N]
self.timestamps = self.timestamps[ipc.mpi.event_reader_rank()::ipc.mpi.nr_event_readers()]
else:
self.times = None
self.fiducials = None
self.i = 0
if not dsrc.startswith('shmem='):
self.event_slice = slice(ipc.mpi.event_reader_rank(), None, ipc.mpi.nr_event_readers())
self.data_source = psana.DataSource(dsrc)
self.run = None
# Define how to translate between LCLS types and Hummingbird ones
self._n2c = {}
self._n2c[psana.Bld.BldDataFEEGasDetEnergy] = 'pulseEnergies'
self._n2c[psana.Bld.BldDataFEEGasDetEnergyV1] = 'pulseEnergies'
self._n2c[psana.Lusi.IpmFexV1] = 'pulseEnergies'
self._n2c[psana.Camera.FrameV1] = 'camera'
# Guard against old(er) psana versions
try:
self._n2c[psana.Bld.BldDataEBeamV1] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV2] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV3] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV4] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV5] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV6] = 'photonEnergies'
self._n2c[psana.Bld.BldDataEBeamV7] = 'photonEnergies'
except AttributeError:
pass
# CXI (CsPad)
self._n2c[psana.CsPad.DataV2] = 'photonPixelDetectors'
self._n2c[psana.CsPad2x2.ElementV1] = 'photonPixelDetectors'
# CXI (OffAxis Cam)
#self._n2c[psana.Camera.FrameV1] = 'photonPixelDetectors'
# AMO (pnCCD)
self._n2c[psana.PNCCD.FullFrameV1] = 'photonPixelDetectors'
self._n2c[psana.PNCCD.FramesV1] = 'photonPixelDetectors'
# --
self._n2c[psana.Acqiris.DataDescV1] = 'ionTOFs'
self._n2c[psana.EventId] = 'eventID'
# Guard against old(er) psana versions
try:
self._n2c[psana.EvrData.DataV3] = 'eventCodes'
self._n2c[psana.EvrData.DataV4] = 'eventCodes'
except AttributeError:
pass
# Calculate the inverse mapping
self._c2n = {}
for k, v in self._n2c.iteritems():
self._c2n[v] = self._c2n.get(v, [])
self._c2n[v].append(k)
# Define how to translate between LCLS sources and Hummingbird ones
self._s2c = {}
# CXI (OnAxis Cam)
self._s2c['DetInfo(CxiEndstation.0:Opal4000.1)'] = 'Sc2Questar'
# CXI (OffAxis Cam)
self._s2c['DetInfo(CxiEndstation.0.Opal11000.0)'] = 'Sc2Offaxis'
# CXI (CsPad)
self._s2 |
Jackson-Y/Machine-Learning | text/feature_extraction.py | Python | mit | 828 | 0.002778 | # -*- coding: utf-8 -*-
'''
Description:
Extract the feature from the text in English.
Version | :
python3
'''
from sklearn.feature_extraction.text import CountVectorizer
VECTORIZER = CountVectorizer(min_df=1)
# 以下代码设置了特征提取方法的参数(以1-2个单词作为滑动窗口大小,以空格作为单词的分割点,最小词频为1)
# 详细参考API介绍:
# http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction
# VECTORIZER = CountVectorizer(ngram_range=(1,2), token_pattern=r'\b\w+\b', min_df=1)
CORPUS = [
'This is the first document.',
'This is the second second document | .',
'And the third one.',
'Is this the first document?'
]
X = VECTORIZER.fit_transform(CORPUS)
FEATURE_NAMES = VECTORIZER.get_feature_names()
print(FEATURE_NAMES)
|
julianwang/cinder | cinder/openstack/common/scheduler/filters/__init__.py | Python | apache-2.0 | 1,389 | 0 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler host filters
"""
from cinder.openstack.common.scheduler import base_filter
class BaseHostFilter(base_filter.BaseFilter):
"""Base class for host fi | lters."""
def _filter_one(self, obj, filter_properties):
"""Return True if the object passes the filter, otherwise False."""
return self.host_passes(obj, filter_properties)
def host_passes(self, host_state, filter_pr | operties):
"""Return True if the HostState passes the filter, otherwise False.
Override this in a subclass.
"""
raise NotImplementedError()
class HostFilterHandler(base_filter.BaseFilterHandler):
def __init__(self, namespace):
super(HostFilterHandler, self).__init__(BaseHostFilter, namespace)
|
jordanemedlock/psychtruths | temboo/core/Library/GitHub/GistsAPI/Gists/__init__.py | Python | apache-2.0 | 1,595 | 0.00627 | from temboo.Library.GitHub.GistsAPI.Gists.CheckGist import CheckGist, CheckGistInputSet, CheckGistResultSet | , CheckGistChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.CreateGist import CreateGist, CreateGistInputSet, CreateGistResultSet, CreateG | istChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.DeleteGist import DeleteGist, DeleteGistInputSet, DeleteGistResultSet, DeleteGistChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.GetGist import GetGist, GetGistInputSet, GetGistResultSet, GetGistChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.ListGistsByUser import ListGistsByUser, ListGistsByUserInputSet, ListGistsByUserResultSet, ListGistsByUserChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.ListGistsForAuthenticatedUser import ListGistsForAuthenticatedUser, ListGistsForAuthenticatedUserInputSet, ListGistsForAuthenticatedUserResultSet, ListGistsForAuthenticatedUserChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.ListPublicGists import ListPublicGists, ListPublicGistsInputSet, ListPublicGistsResultSet, ListPublicGistsChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.ListStarredGists import ListStarredGists, ListStarredGistsInputSet, ListStarredGistsResultSet, ListStarredGistsChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.StarGist import StarGist, StarGistInputSet, StarGistResultSet, StarGistChoreographyExecution
from temboo.Library.GitHub.GistsAPI.Gists.UnstarGist import UnstarGist, UnstarGistInputSet, UnstarGistResultSet, UnstarGistChoreographyExecution
|
sergecodd/FireFox-OS | B2G/gecko/testing/marionette/client/marionette/tests/unit/test_cookies.py | Python | apache-2.0 | 3,426 | 0.005546 | import calendar
import time
import random
from marionette_test import MarionetteTestCase
class CookieTest(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
test_url = self.marionette.absolute_url('test.html')
self.marionette.navigate(test_url)
self.COOKIE_A = {"name": "foo",
"value": "bar",
"path": "/",
"secure": False}
def tearDown(self):
self.marionette.delete_all_cookies()
MarionetteTestCase.tearDown(self)
def testAddCookie(self):
self.marionette.add_cookie(self.COOKIE_A)
cookie_returned = str(self.marionette.execute_script("return document.cookie"))
self.assertTrue(self.COOKIE_A["name"] in cookie_returned)
def testAddingACookieThatExpiredInThePast(self):
cookie = self.COOKIE_A.copy()
cookie["expiry"] = calendar.timegm(time.gmtime()) - 1
self.marionette.add_cookie(cookie)
cookies = self.marionette.get_cookies()
self.assertEquals(0, len(cookies))
def testDeleteAllCookie(self):
| self.marionette.add_cookie(self.COOKIE_A)
cookie_returned = str(self.marionette.execute_script("return document.cookie"))
print cookie_returned
self.a | ssertTrue(self.COOKIE_A["name"] in cookie_returned)
self.marionette.delete_all_cookies()
self.assertFalse(self.marionette.get_cookies())
def testDeleteCookie(self):
self.marionette.add_cookie(self.COOKIE_A)
cookie_returned = str(self.marionette.execute_script("return document.cookie"))
self.assertTrue(self.COOKIE_A["name"] in cookie_returned)
self.marionette.delete_cookie("foo")
cookie_returned = str(self.marionette.execute_script("return document.cookie"))
self.assertFalse(self.COOKIE_A["name"] in cookie_returned)
def testShouldGetCookieByName(self):
key = "key_%d" % int(random.random()*10000000)
self.marionette.execute_script("document.cookie = arguments[0] + '=set';", [key])
cookie = self.marionette.get_cookie(key)
self.assertEquals("set", cookie["value"])
def testGetAllCookies(self):
key1 = "key_%d" % int(random.random()*10000000)
key2 = "key_%d" % int(random.random()*10000000)
cookies = self.marionette.get_cookies()
count = len(cookies)
one = {"name" :key1,
"value": "value"}
two = {"name":key2,
"value": "value"}
self.marionette.add_cookie(one)
self.marionette.add_cookie(two)
test_url = self.marionette.absolute_url('test.html')
self.marionette.navigate(test_url)
cookies = self.marionette.get_cookies()
self.assertEquals(count + 2, len(cookies))
def testShouldNotDeleteCookiesWithASimilarName(self):
cookieOneName = "fish"
cookie1 = {"name" :cookieOneName,
"value":"cod"}
cookie2 = {"name" :cookieOneName + "x",
"value": "earth"}
self.marionette.add_cookie(cookie1)
self.marionette.add_cookie(cookie2)
self.marionette.delete_cookie(cookieOneName)
cookies = self.marionette.get_cookies()
self.assertFalse(cookie1["name"] == cookies[0]["name"], msg=str(cookies))
self.assertEquals(cookie2["name"] , cookies[0]["name"], msg=str(cookies))
|
beeftornado/sentry | src/sentry/migrations/0072_alert_rules_query_changes.py | Python | bsd-3-clause | 4,530 | 0.003311 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-01 23:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import sentry.db.models.fields.bounded
import sentry.db.models.fields.foreignkey
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
('sentry', '0071_add_default_fields_model_subclass'),
]
operations = [
migrations.CreateModel(
name='SnubaQuery',
fields=[
('id', sentry.db.models.fields.bounded.BoundedBigAutoField(primary_key=True, serialize=False)),
('dataset', models.TextField()),
('query', models.TextField()),
('aggregate', models.TextField()),
('time_window', models.IntegerField()),
('resolution', models.IntegerField()),
('date_added', models.DateTimeField(default=django.utils.timezone.now)),
('environment', sentry.db.models.fields.foreignkey.FlexibleForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='sentry.Environment')),
],
options={
'db_table': 'sentry_snubaquery',
},
),
migrations.AlterField(
model_name='alertrule',
name='aggregation',
field=models.IntegerField(default=0, null=True),
),
| migrations.AlterField(
model_name='alertrule',
name='dataset',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='alertrule',
name='query',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='alertrule',
name='resolution',
field=models.IntegerField(null=True),
),
migrations.AlterField(
| model_name='alertrule',
name='time_window',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='querysubscription',
name='aggregation',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='querysubscription',
name='dataset',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='querysubscription',
name='query',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='querysubscription',
name='resolution',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='querysubscription',
name='time_window',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='alertrule',
name='snuba_query',
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='sentry.SnubaQuery', unique=True),
),
migrations.AddField(
model_name='querysubscription',
name='snuba_query',
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subscriptions', to='sentry.SnubaQuery'),
),
]
|
apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/puls4.py | Python | unlicense | 3,103 | 0.001289 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
int_or_none,
)
class Puls4IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puls4\.com/video/[^/]+/play/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.puls4.com/video/pro-und-contra/play/2716816',
'md5': '49f6a6629747eeec43cef6a46b5df81d',
'info_dict': {
'id': '2716816',
'ext': 'mp4',
'title': 'Pro und Contra vom 23.02.2015',
'description': 'md5:293e44634d9477a67122489994675db6',
'duration': 2989,
'upload_date': '20150224',
'uploader': 'PULS_4',
},
'skip': 'Only works from Germany',
}, {
'url': 'http://www.puls4.com/video/kult-spielfilme/play/1298106',
'md5': '6a48316c8903ece8dab9b9a7bf7a59ec',
'info_dict': {
'id': '1298106',
'ext': 'mp4',
'title': 'Lucky Fritz',
},
'skip': 'Only works from Germany',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
error_message = self._html_search_regex(
r'<div class="message-error">(.+?)</div>',
webpage, 'error message', default=None)
if error_message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
real_url = self._html_search_regex(
r'\"fsk-button\".+?href=\"([^"]+)',
webpage, 'fsk_button', default=None)
if real_url:
webpage = self._download_webpage(real_url, video_id)
player = self._search_regex(
r'p4_video_player(?:_iframe)?\("video_\d+_container"\s*,(.+?)\);\s*\}',
webpage, 'player')
player_json = self._parse_json(
'[%s]' % player, video_id,
transform_source=lambda s: s.replace('undefined,', ''))
formats = None
result = None
for v in player_json:
if isinstance(v, list) and not formats:
formats = [{
'url': f['url'],
'format': 'hd' if f.get('hd') else 'sd',
'width': int_or_none(f.get('size_x')),
'height': int_or_none(f.get('size_y')),
'tbr': int_or_none(f.get('bitrate')),
} for f in v]
self._sort_formats(formats)
elif isinstance(v, dict) and not result:
result = {
'id': video_id,
'title': v['videopartname'].strip(),
| 'description': v.get('videotitle'),
'duration': int_or_none(v.get('videoduration') or v.get('episoded | uration')),
'upload_date': unified_strdate(v.get('clipreleasetime')),
'uploader': v.get('channel'),
}
result['formats'] = formats
return result
|
JanX2/LaunchAtLoginHelper | setup.py | Python | mit | 738 | 0.00542 | import sys, os
import plistlib
urlScheme = sys.argv[1]
bundleIdentifier = sys.argv[2]
directory = os.path.dirname(os.path.abspath(__file__))
stringsOutput = os.path.join(directory, 'LLStrings.h')
infoPlistOutput = os.path.join(directory, 'LaunchAtLoginHelper/LaunchAtLoginHelper-Info.plist')
infoPlist = plistlib.readPlist(os.path.join(directory, 'LaunchAtLoginHelper/LaunchAtLoginHelper-InfoBase.plist'))
with open(stringsOutput, 'w | ') as strings:
strings.write("""// strings used by LLManager and LaunchAtLoginHelper
//
#define LLURLScheme @"%(urlScheme)s"
#define LLHelperBundleIdentifier @"%(bundleIdentifier)s"
"""%locals())
infoPlist[' | CFBundleIdentifier'] = bundleIdentifier
plistlib.writePlist(infoPlist, infoPlistOutput)
|
intuition-io/intuition | intuition/__init__.py | Python | apache-2.0 | 541 | 0 | '''
Intuition
----
Intuition is an engine, some building bricks and a set of tools meant to let
yo | u efficiently and intuitively make your own automated quantitative trading
system. It is designed to let traders, developers and scientists explore,
improve and deploy market technical hacks.
:copyright (c) 2014 Xavier Bruhiere
:license: Apache 2.0, see LICENSE for more details.
'''
__project__ = 'intuition'
__author__ = 'Xavier Bruhiere'
__copyright__ = 'Xavier Bru | hiere'
__licence__ = 'Apache 2.0'
__version__ = '0.4.0'
|
PhilHarnish/forge | src/puzzle/examples/mim/p1_3.py | Python | mit | 1,874 | 0.001067 | from puzzle.puzzlepedia import puzzle
def get():
return puzzle.Puzzle('Puzzle 1.3: The Missing Painting', SOURCE)
SOURCE = """
name in {Beth, Charles, David, Frank, Jessica, Karen, Taylor}
job in {attorny, banker, composer, de | corator, entrepreneur, filmmaker, gerontologist}
start in {boathouse, cottage, garden, lighthouse, mansion, pond, windmill}
status in {crime, innocent}
# Setup: only one crime was committed.
sum([n.crime for n in name]) == 1
#2
boathouse.crime or cottage.crime or lighthouse.crime or windmill.crime
#4
if entrepreneur.innocent:
not mansion.c | rime
#5
not gerontologist.crime
#6
Karen == decorator
#7
entrepreneur == cottage or entrepreneur == mansion or entrepreneur == pond
filmmaker == cottage or filmmaker == mansion or filmmaker == pond
gerontologist == cottage or gerontologist == mansion or gerontologist == pond
#8
if charles.crime:
not cottage.crime
#9
if beth.innocent:
beth == banker
beth == windmill
#10
if charles.innocent:
charles == gerontologist
not charles.mansion
not charles.pond
#11
if david.innocent:
not boathouse.crime
#12
if frank.innocent:
frank != entrepreneur
entrepreneur != mansion
#13
if jessica.innocent:
mansion.innocent
pond.innocent
jessica != mansion
jessica != pond
#14
if karen.innocent:
karen == lighthouse
boathouse == innocent
mansion == innocent
#15
if taylor.innocent:
taylor == attorny
taylor == garden
if windmill.crime:
jessica.crime
"""
SOLUTION = """
name | job | start | status
Beth | banker | windmill | innocent
Charles | gerontologist | cottage | innocent
David | entrepreneur | pond | innocent
Frank | filmmaker | mansion | innocent
Jessica | composer | boathouse | innocent
Karen | decorator | lighthouse | crime
Taylor | attorny | garden | innocent
"""
|
ArielInfante/HappyHour-Flask | app/users/forms.py | Python | mit | 3,344 | 0.002691 | from flask_wtf import Form
from wtforms import StringField, PasswordField, SubmitField, BooleanField, RadioField, TextAreaField, FileField
from wtforms.validators import InputRequired, Length, Email, Regexp
from flask_wtf.file import FileRequired, FileAllowed
class SignUpForm(Form):
username = StringField('Username', validators=[
InputRequired(message="Username is required"),
Regexp('^[A-Za-z][A-Za-z0-9_]*[A-Za-z0-9]$', 0, "Usernames must only have letters, numbers or underscores")
])
name = StringField('Name', validators=[
InputRequired(message="Name id required"),
| Regexp('^[A-Za-z][A-Za-z ]*[A-Za-z]$', 0, "Your name must only have letters")
])
email = StringField('Email', validators=[
InputRequired(message="Email is required"),
Email(message="Th | is is not a valid email")
])
password = PasswordField('Password', validators=[
InputRequired(message="Password is required"),
Length(min=6, message="The password is not long enough")
])
accept_tos = BooleanField("Accept Terms of Service", validators=[
InputRequired(message="You have to accept the Terms of Service in order to use this site")
], default=False)
submit = SubmitField('Signup')
class LoginForm(Form):
username_email = StringField('Username or Email', validators=[
InputRequired(message="Need to insert either username or email")
])
password = PasswordField('Password', validators=[
InputRequired(message="Insert your password")
])
remember_me = BooleanField('Keep me logged in', default=False)
submit = SubmitField('Login')
class EditProfileForm(Form):
privacy_option = RadioField('Private Account', validators=[
InputRequired(message="Need to have an option chosen")
], choices=[
(True, "Yes"), (False, "No")
], default=True)
name = StringField('Name', validators=[
InputRequired(message="Name id required"),
Regexp('^[A-Za-z][A-Za-z]*[A-Za-z]$', 0, "Your name must only have letters")
])
username = StringField('Username', validators=[
InputRequired(message="Username is required"),
Regexp('^[A-Za-z][A-Za-z0-9_]*[A-Za-z0-9]$', 0, "Usernames must only have letters, numbers or underscores")
])
email = StringField('Email', validators=[
InputRequired(message="Email is required"),
Email(message="This is not a valid email")
])
password = PasswordField('Password', validators=[
InputRequired(message="Password is required"),
Length(min=6, message="The password is not long enough")
])
bio = TextAreaField("Bio")
submit = SubmitField('Save Changes')
class AvatarForm(Form):
photo = FileField('Upload Avatar', validators=[
FileRequired('A picture is required'),
FileAllowed(['jpg', 'png'], "images only")
])
submit = SubmitField("Crop & Save")
class ChangePasswordForm(Form):
submit = SubmitField('Change Password')
class ChangeEmailForm(Form):
change_email = StringField('New Email', validators=[
InputRequired(message="You need to insert a new email"),
Email(message="This is not a valid email")
])
submit = SubmitField('Change Email')
class ChangeUsernameForm(Form):
submit = SubmitField('Change Username')
|
franklingu/leetcode-solutions | questions/convert-a-number-to-hexadecimal/Solution.py | Python | mit | 1,481 | 0.003381 | """
Given an integer, write an algorithm to convert it to hexadecimal. For negative integer, two’s complement method is used.
Note:
All letters in hexadecimal (a-f) must be in lowercase.
The hexadecimal string must not contain extra leading 0s. If the number is zero, it is represented by a single zero character '0'; otherwise, the first character in the hexadecimal string will not be the zero character.
The given number is guaranteed to fit within the range of a 32-bit signed integer.
You must not use any method provided by the library which converts/formats the number to hex directly.
Example 1:
Input:
26
Output:
"1a"
Exa | mple 2:
Input:
-1
Output:
"ffffffff"
"""
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
curr = []
ret = []
for i in xrange(32):
curr.append(str(num & 0x01))
num = num >> 1
if len(curr) == 4:
n = int(''.join(reversed(curr)), 2)
if n < 10:
ret.append(str(n))
else:
| ret.append(chr(ord('a') + n - 10))
curr = []
cleaned = []
is_ok = False
for i, n in enumerate(reversed(ret)):
if n != '0':
is_ok = True
if is_ok:
cleaned.append(n)
if not cleaned:
return '0'
return ''.join(cleaned) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.