repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
MrgInfo/PiCam | upload.py | Python | gpl-2.0 | 5,086 | 0.000984 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Dropbox upload daemon.
"""
from fnmatch import fnmatch
from operator import itemgetter
from os import listdir, path, mknod, stat
from time import strptime, sleep, time
from dropbox.client import DropboxClient, DropboxOAuth2FlowNoRedirect
from dropbox.rest import ErrorResponse
from urllib3.exceptions import MaxRetryError
from utils import settings
from utils.daemons import DaemonBase, init
from utils.database import DatabaseConnection
__author__ = "wavezone"
__copyright__ = "Copyright 2016, MRG-Infó Bt."
__credits__ = ["Groma István (wavezone)"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Groma István"
__email__ = "wavezone@mrginfo.com"
class UploadDaemon(DaemonBase):
""" Dropbox upload daemon.
"""
first_time = False
max_size = 10 * (1024 ** 3)
access_token = settings.config.access_token
def __init__(self, directory: str):
""" Constructor.
"""
super().__init__()
self.directory = directory
if self.access_token is None or self.access_token == '':
# noinspection SpellCheckingInspection
flow = DropboxOAuth2FlowNoRedirect('m9cijknmu1po39d', 'bi8dlhif9215qg3')
authorize_url = flow.start()
print("OAuth 2 authorization process")
print("1. Go to: {}".format(authorize_url))
print("2. Click Allow (you might have to log in first).")
print("3. Copy the authorization code.")
code = input("4. Enter the authorization code here: ").strip()
self.access_token, user_id = flow.finish(code)
settings.config.access_token = self.access_token
self.first_time = True
@staticmethod
def _get(client: DropboxClient) -> list:
""" Get files from Dropbox.
"""
try:
metadata = client.metadata('/')
except (MaxRetryError, ErrorResponse):
| return None
return [
{
'file': m['path'],
'modified': strptime(m['modified'], '%a, %d %b %Y %H:%M:%S %z'),
'size': m['bytes']
}
for m in metadata['contents']
if not m['is_dir']
]
def _upload(self, clie | nt: DropboxClient):
""" Upload new files from directory.
"""
now = time()
for filename in listdir(self.directory):
if fnmatch(filename, '*.upl'):
continue
local_name = '/' + filename
full_name = path.join(self.directory, filename)
upl_name = "{}.upl".format(full_name)
if not path.isfile(upl_name) and stat(full_name).st_mtime < now - 60:
with open(full_name, 'rb') as file_stream:
try:
client.put_file(local_name, file_stream)
share = client.share(local_name)
except (MaxRetryError, ErrorResponse):
continue
with DatabaseConnection() as db:
update = """
UPDATE events
SET url = '{}',
uploaded = current_timestamp
WHERE file = '{}'
""".format(share['url'], full_name)
db.dml(update)
try:
mknod(upl_name)
except FileExistsError:
pass
print("{} was uploaded to Dropbox.".format(filename))
def _rotate(self, client: DropboxClient, files: list):
""" Rotate Dropbox in order to save storage.
"""
total_size = sum(item['size'] for item in files)
files_history = sorted(files, key=itemgetter('modified'))
for file in files_history:
if total_size < self.max_size:
break
try:
client.file_delete(file['file'])
print("{} was deleted from Dropbox.".format(file['file']))
total_size -= file['size']
except (MaxRetryError, ErrorResponse):
pass
def run(self):
""" Upload logic.
"""
if self.first_time:
return
print("Uploading from {} to Dropbox.".format(self.directory), flush=True)
try:
client = DropboxClient(self.access_token)
while True:
self._upload(client)
files = self._get(client)
if files is not None:
self._rotate(client, files)
print("Going idle...", end='', flush=True)
sleep(2 * 60)
print("DONE", flush=True)
except KeyboardInterrupt:
print()
except SystemExit:
pass
finally:
print("No longer uploading from {} to Dropbox.".format(self.directory), flush=True)
if __name__ == '__main__':
my_daemon = UploadDaemon(settings.config.working_dir)
init(my_daemon)
|
ecederstrand/exchangelib | exchangelib/services/find_item.py | Python | bsd-2-clause | 4,353 | 0.002068 | from ..errors import InvalidEnumValue
from ..folders.base import BaseFolder
from ..items import ID_ONLY, ITEM_TRAVERSAL_CHOICES, SHAPE_CHOICES, Item
from ..util import MNS, TNS, create_element, set_xml_value
from .common import EWSPagingService, folder_ids_element, shape_element
class FindItem(EWSPagingService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/finditem-operation"""
SERVICE_NAME = "FindItem"
element_container_name = f"{{{TNS}}}Items"
paging_container_name = f"{{{MNS}}}RootFolder"
supports_paging = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# A hack to communicate parsing args to _elems_to_objs()
self.additional_fields = None
self.shape = None
def call(
self,
folders,
additional_fields,
restriction,
order_fields,
shape,
query_string,
depth,
calendar_view,
max_items,
offset,
):
"""Find items in an account.
:param folders: the folders to act on
:param additional_fields: the extra fields that should be returned with the item, as FieldPath objects
:param restriction: a Restriction object for
:param order_fields: the fields to sort the results by
:param shape: The set of attributes to return
:param query_string: a QueryString object
:param depth: How deep in the folder structure to search for items
:param calendar_view: If set, returns recurring calendar items unfolded
:param max_items: the max number of items to return
:param offset: the offset relative to the first item in the item collection. Usually 0.
:return: XML elements for the matching items
"""
if shape not in SHAPE_CHOICES:
raise InvalidEnumValue("shape", shape, SHAPE_CHOICES)
if depth not in ITEM_TRAVERSAL_CHOICES:
| raise InvalidEnumValue("depth", depth, ITEM_TRAVERSAL_CHOICES | )
self.additional_fields = additional_fields
self.shape = shape
return self._elems_to_objs(
self._paged_call(
payload_func=self.get_payload,
max_items=max_items,
folders=folders,
**dict(
additional_fields=additional_fields,
restriction=restriction,
order_fields=order_fields,
query_string=query_string,
shape=shape,
depth=depth,
calendar_view=calendar_view,
page_size=self.page_size,
offset=offset,
),
)
)
def _elem_to_obj(self, elem):
if self.shape == ID_ONLY and self.additional_fields is None:
return Item.id_from_xml(elem)
return BaseFolder.item_model_from_tag(elem.tag).from_xml(elem=elem, account=self.account)
def get_payload(
self,
folders,
additional_fields,
restriction,
order_fields,
query_string,
shape,
depth,
calendar_view,
page_size,
offset=0,
):
payload = create_element(f"m:{self.SERVICE_NAME}", attrs=dict(Traversal=depth))
payload.append(
shape_element(
tag="m:ItemShape", shape=shape, additional_fields=additional_fields, version=self.account.version
)
)
if calendar_view is None:
view_type = create_element(
"m:IndexedPageItemView", attrs=dict(MaxEntriesReturned=page_size, Offset=offset, BasePoint="Beginning")
)
else:
view_type = calendar_view.to_xml(version=self.account.version)
payload.append(view_type)
if restriction:
payload.append(restriction.to_xml(version=self.account.version))
if order_fields:
payload.append(set_xml_value(create_element("m:SortOrder"), order_fields, version=self.account.version))
payload.append(folder_ids_element(folders=folders, version=self.protocol.version, tag="m:ParentFolderIds"))
if query_string:
payload.append(query_string.to_xml(version=self.account.version))
return payload
|
smartboyathome/Wonderland-Engine | tests/CheshireCatTests/test_team_checks_manual.py | Python | agpl-3.0 | 3,635 | 0.002476 | '''
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
'''
import json
from CheshireCat.utils import convert_datetime_to_timestamp, convert_all_datetime_to_timestamp
from tests import show_difference_between_dicts
from tests.CheshireCatTests import FlaskTestCase
class TestRestTeamChecksManualsInterface(FlaskTestCase):
def test_get_all_manual_checks_for_specific_team(self):
self.login_user('admin', 'admin')
rest_result = self.app.get('/teams/1/checks/manual')
print rest_result.status_code, rest_result.data
assert rest_result.status_code == 200
expected_result = [obj for obj in self.data['completed_checks'] if obj['team_id'] == '1' and obj['type'] == 'manual']
json_result = json.loads(rest_result.data)
assert len(json_result) == len(expected_result)
for i in expected_result:
del i['team_id'], i['type']
convert_all_datetime_to_timestamp(i, ['timestamp', 'time_to_check'])
assert json_result == expected_result
def test_get_all_manual_checks_for_specific_team_with_params(self):
self.login_user('admin', 'admin')
query_data = {
"failure": "assured"
}
result_data = {
"type": "IllegalParameter",
"reason": "Parameters are not allowed for this interface."
}
result = self.app.get('/teams/1/checks/manual', data=json.dumps(query_data))
print result.data
assert result.status_code == 403
assert json.loads(result.data) == result_data
def test_get_specific_manual_check_for_specific_team(self):
self.login_user('admin', 'admin')
rest_result = self.app.get('/teams/1/checks/manual/BoardPresentation')
print rest_result.status_code, rest_result.data
assert rest_result.status_code == 200
expected_result = [obj for obj in self.data['completed_checks'] if obj['team_id'] == '1' and obj['type'] == 'manual' and obj['id'] == 'BoardPresentation']
json_result = json.loads(rest_result.data)
assert len(json_result) == len(expected_result)
for i in expected_result:
del i['team_id'], i['type'], i['id']
convert_all_datetime_to_timestamp(i, ['timestamp', 'time_to_check'])
assert json_result == expected_result
def test_get_spe | cific_manual_check_for_specific_team_with_params(self):
self.login_user('admin', 'admin')
query_data = {
"failure": "assured"
}
result_data = {
"type": "IllegalParameter",
"reason": "Parameters are not allowed for this interface."
}
result = self.app.get('/teams/1/checks/manual/BoardPresentation' | , data=json.dumps(query_data))
print result.data
assert result.status_code == 403
assert json.loads(result.data) == result_data |
gltn/stdm | stdm/third_party/sqlalchemy/orm/exc.py | Python | gpl-2.0 | 6,616 | 0 | # orm/exc.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc
from .. import util
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve ex | pected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
code = "bhk3"
class UnmappedInstanceError(UnmappedError):
| """An mapping operation was requested for an unknown instance."""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, obj, msg=None):
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = (
"Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance "
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name)
)
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
"; was a class (%s) supplied where an instance was "
"required?" % _safe_cls_name(obj)
)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`_query.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, state, msg=None):
if not msg:
msg = (
"Instance '%s' has been deleted, or its "
"row is otherwise not present." % base.state_str(state)
)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa_exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa_exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
class LoaderStrategyException(sa_exc.InvalidRequestError):
"""A loader strategy for an attribute does not exist."""
def __init__(
self,
applied_to_property_type,
requesting_property,
applies_to,
actual_strategy_type,
strategy_key,
):
if actual_strategy_type is None:
sa_exc.InvalidRequestError.__init__(
self,
"Can't find strategy %s for %s"
% (strategy_key, requesting_property),
)
else:
sa_exc.InvalidRequestError.__init__(
self,
'Can\'t apply "%s" strategy to property "%s", '
'which is a "%s"; this loader strategy is intended '
'to be used with a "%s".'
% (
util.clsname_as_plain_name(actual_strategy_type),
requesting_property,
util.clsname_as_plain_name(applied_to_property_type),
util.clsname_as_plain_name(applies_to),
),
)
def _safe_cls_name(cls):
try:
cls_name = ".".join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, "__name__", None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
|
bloyl/mne-python | mne/io/eximia/eximia.py | Python | bsd-3-clause | 3,000 | 0 | # Authors: Eric Larson <larson.eric.d@gmail.com>
# Federico Raimondo <federaimondo@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
from ..base import BaseRaw
from ..utils import _read_segments_file, _file_size
from ..meas_info import create_info
from ...utils import logger, verbose, warn, fill_doc, _check_fname
@fill_doc
def read_raw_eximia(fname, preload=False, verbose=None):
"""Reader for an eXimia EEG file.
Parameters
----------
fname : str
Path to the eXimia data file (.nxe).
%(preload)s
%(verbose)s
Returns
----- | --
raw : instance of RawEximia
A Raw object containing eXimia data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawEximia(fname, preload, verbose)
@fill_doc
class RawEximia(BaseRaw):
"""Raw object from an Eximia EEG file.
Parameters
----------
fname : str
Path to the eXimia data file (.nx | e).
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, fname, preload=False, verbose=None):
fname = _check_fname(fname, 'read', True, 'fname')
data_name = op.basename(fname)
logger.info('Loading %s' % data_name)
# Create vhdr and vmrk files so that we can use mne_brain_vision2fiff
n_chan = 64
sfreq = 1450.
# data are multiplexed int16
ch_names = ['GateIn', 'Trig1', 'Trig2', 'EOG']
ch_types = ['stim', 'stim', 'stim', 'eog']
cals = [0.0015259021896696422, 0.0015259021896696422,
0.0015259021896696422, 0.3814755474174106]
ch_names += ('Fp1 Fpz Fp2 AF1 AFz AF2 '
'F7 F3 F1 Fz F2 F4 F8 '
'FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 '
'T7 C5 C3 C1 Cz C2 C4 C6 T8 '
'TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10 '
'P9 P7 P3 P1 Pz P2 P4 P8 '
'P10 PO3 POz PO4 O1 Oz O2 Iz'.split())
n_eeg = len(ch_names) - len(cals)
cals += [0.07629510948348212] * n_eeg
ch_types += ['eeg'] * n_eeg
assert len(ch_names) == n_chan
info = create_info(ch_names, sfreq, ch_types)
n_bytes = _file_size(fname)
n_samples, extra = divmod(n_bytes, (n_chan * 2))
if extra != 0:
warn('Incorrect number of samples in file (%s), the file is '
'likely truncated' % (n_samples,))
for ch, cal in zip(info['chs'], cals):
ch['cal'] = cal
super(RawEximia, self).__init__(
info, preload=preload, last_samps=(n_samples - 1,),
filenames=[fname], orig_format='short')
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
_read_segments_file(
self, data, idx, fi, start, stop, cals, mult, dtype='<i2')
|
bitbyt3r/musicserver | start.py | Python | gpl-2.0 | 459 | 0.006536 | #!/usr/bin/python
import xmlrpc.client
from time import sleep
def remoteCall(func, *args):
try:
| ret = func(*args)
return ret
except xmlrpc.client.Fault as e:
print(e)
server = xmlrpc.client.ServerProxy("https://localhost:8080", allow_none=True)
sid = remoteCall(server.login, "dj", "abc123")
print(remoteCall(server.getfoo, sid))
sleep(3)
print(remoteCall(server.ge | tfoo, sid))
sleep(6)
print(remoteCall(server.getfoo, sid))
|
Gawen/pytun | pytun.py | Python | mit | 6,392 | 0.006414 | """ pytun
pytun is a tiny piece of code which gives you the ability to create and
manage tun/tap tunnels on Linux (for now).
"""
__author__ = "Gawen Arab"
__copyright__ = "Copyright 2012, Gawen Arab"
__credits__ = ["Gawen Arab", "Ben Lapid"]
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "Gawen Arab"
__email__ = "g@wenarab.com"
__status__ = "Beta"
import os
import fcntl
import socket
import struct
import logging
import functools
TUN_KO_PATH = "/dev/net/tun"
logger = logging.getLogger("pytun")
class Tunnel(object):
""" tun/tap handler class """
class AlreadyOpened(Exception):
""" Raised when the user try to open a already-opened
tunnel.
"""
pass
class NotPermitted(Exception):
""" Raised when pytun try to setup a new tunnel without
the good permissions.
"""
pass
MODES = {
"tun": 0x0001,
"tap": 0x0002,
}
# No packet information flag
IFF_NO_PI = 0x1000
# ioctl call
TUNSETIFF = 0x400454ca
SIOCSIFHWADDR = 0x8924
SIOCSIFADDR = 0x8916
SIOCSIFFLAGS = 0x8914
IFF_UP = 0x1
IFF_POINTOPOINT = 0x10
IFF_RUNNING = 0x40
IFF_NOARP = 0x80
IFF_MULTICAST = 0x1000
def __init__(self, mode = None, pattern = None, auto_open = None, no_pi = False):
""" Create a new tun/tap tunnel. Its type is defined by the
argument 'mode', whose value can be either a string or
the system value.
The argument 'pattern set the string format used to
generate the name of the future tunnel. By default, for
Linux, it is "tun%d" or "tap%d" depending on the mode.
If the argument 'auto_open' is true, this constructor
will automatically create the tunnel.
If the argument 'no_pi' is true, the device will be
be opened with teh IFF_NO_PI flag. Otherwise, 4 extra
bytes are added to the beginning of the packet (2 flag
bytes and 2 protocol bytes).
"""
mode = mode if mode is not None else "tun"
pattern = pattern if pattern is not None else ""
auto_open = auto_open if auto_open is not None else True
super(Tunnel, self).__init__()
self.pattern = pattern
self.mode = mode
self.no_pi = self.IFF_NO_PI if no_pi else 0x0000
self.name = None
self.fd = None
if isinstance(self.mode, basestring):
self.mode = self.MODES.get(self.mode, None)
assert self.mode is not None, "%r is not a valid tunnel type." % (self.mode, )
if auto_open:
self.open()
def __del__(self):
self.close()
@property
def mode_name(self):
""" Returns the tunnel mode's name, for printing purpose. """
for name, id in self.MODES.iteritems():
if id == self.mode:
return name
def fileno(self):
""" Standard function which makes this class 'select()' compatible. """
return self.fd
def open(self):
""" Create the tunnel.
If the tunnel is already opened, the function will
raised an AlreadyOpened exception.
"""
if self.fd is not None:
raise self.AlreadyOpened()
logger.debug("Opening %s..." % (TUN_KO_PATH, ))
self.fd = os.open(TUN_KO_PATH, os.O_RDWR)
logger.debug("Opening %s tunnel '%s'..." % (self.mode_name.upper(), self.pattern, ))
try:
ret = fcntl.ioctl(self.fd, self.TUNSETIFF, struct.pack("16sH", self.pattern, self.mode | self.no_pi))
except IOError, e:
| if e.errno == 1:
logger.error("Cannot open a %s tunnel because the operation is not permitted." % (self.mode_name.upper(), ))
raise self.NotPermitted()
raise
self.name = ret[:16].strip("\x00")
logger.info("Tunnel '%s' opened." % (self.name, ))
def close(self):
""" Close the tunnel.
If the tunnel is already closed or never opened,
do nothing.
"""
if self.fd is None:
| return
logger.debug("Closing tunnel '%s'..." % (self.name or "", ))
# Close tun.ko file
os.close(self.fd)
self.fd = None
logger.info("Tunnel '%s' closed." % (self.name or "", ))
def send(self, buf):
""" Send the buffer 'buf'. """
os.write(self.fd, buf)
def recv(self, size = None):
""" Receive a buffer. The default size is 1500, the
classical MTU.
"""
size = size if size is not None else 1500
return os.read(self.fd, size)
def set_mac(self, mac):
""" Sets the MAC address of the device to 'mac'.
parameter 'mac' should be a binary representation
of the MAC address
Note: Will fail for TUN devices
"""
mac = map(ord, mac)
ifreq = struct.pack('16sH6B8', self.name, socket.AF_UNIX, *mac)
fcntl.ioctl(self.fileno(), self.SIOCSIFHWADDR, ifreq)
def set_ipv4(self, ip):
""" Sets the IP address (ifr_addr) of the device
parameter 'ip' should be string representation of IP address
This does the same as ifconfig.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bin_ip = socket.inet_aton(ip)
ifreq = struct.pack('16sH2s4s8s', self.name, socket.AF_INET, '\x00'*2, bin_ip, '\x00'*8)
fcntl.ioctl(sock, self.SIOCSIFADDR, ifreq)
ifreq = struct.pack('16sH', self.name, self.IFF_UP|self.IFF_POINTOPOINT|self.IFF_RUNNING|self.IFF_MULTICAST)
fcntl.ioctl(sock, self.SIOCSIFFLAGS, ifreq)
def __repr__(self):
return "<%s tunnel '%s'>" % (self.mode_name.capitalize(), self.name, )
class TunTunnel(Tunnel):
""" tun handler class. """
def __init__(self, *kargs, **kwargs):
super(TunTunnel, self).__init__("tun", *kargs, **kwargs)
class TapTunnel(Tunnel):
""" tap handler class. """
def __init__(self, *kargs, **kwargs):
super(TapTunnel, self).__init__("tap", **kwargs)
""" Convenient functions to open tunnels. """
tunnel = functools.partial(Tunnel, auto_open = True)
open = tunnel
|
mdmintz/SeleniumBase | seleniumbase/translate/italian.py | Python | mit | 21,139 | 0 | # Italian / Italiano - Translations - Python 3 Only!
from seleniumbase import BaseCase
from seleniumbase import MasterQA
class CasoDiProva(BaseCase):
def __init__(self, *args, **kwargs):
super(CasoDiProva, self).__init__(*args, **kwargs)
self._language = "Italian"
def apri(self, *args, **kwargs):
# open(url)
return self.open(*args, **kwargs)
def apri_url(self, *args, **kwargs):
# open_url(url)
return self.open_url(*args, **kwargs)
def fare_clic(self, *args, **kwargs):
# click(selector)
return self.click(*args, **kwargs)
def doppio_clic(self, *args, **kwargs):
# double_click(selector)
return self.double_click(*args, **kwargs)
def clic_lentamente(self, *args, **kwargs):
# slow_click(selector)
return self.slow_click(*args, **kwargs)
def clic_se_visto(self, *args, **kwargs):
# click_if_visible(selector, by=By.CSS_SELECTOR)
return self.click_if_visible(*args, **kwargs)
def clic_testo_del_collegamento(self, *args, **kwargs):
# click_link_text(link_text)
return self.click_link_text(*args, **kwargs)
def aggiornare_testo(self, *args, **kwargs):
# update_text(selector, text)
return self.update_text(*args, **kwargs)
def digitare(self, *args, **kwargs):
# type(selector, text) # Same as update_text()
return self.type(*args, **kwargs)
def aggiungi_testo(self, *args, **kwargs):
# add_text(selector, text)
return self.add_text(*args, **kwargs)
def ottenere_testo(self, *args, **kwargs):
# get_text(selector, text)
return self.get_text(*args, **kwargs)
def verificare_testo(self, *args, **kwargs):
# assert_text(text, selector)
return self.assert_text(*args, **kwargs)
def verificare_testo_esatto(self, *args, **kwargs):
# assert_exact_text(text, selector)
return self.assert_exact_text(*args, **kwargs)
def verificare_testo_del_collegamento(self, *args, **kwargs):
# assert_link_text(link_text)
return self.assert_link_text(*args, **kwargs)
def verificare_elemento(self, *args, **kwargs):
# assert_element(selector)
return self.assert_element(*args, **kwargs)
def verificare_elemento_visto(self, *args, **kwargs):
# assert_element_visible(selector) # Same as self.assert_element()
return self.assert_element_visible(*args, **kwargs)
def verificare_elemento_non_visto(self, *args, **kwargs):
# assert_element_not_visible(selector)
return self.assert_element_not_visible(*args, **kwargs)
def verificare_elemento_presente(self, *args, **kwargs):
# assert_element_present(selector)
return self.assert_element_present(*args, **kwargs)
def verificare_elemento_assente(self, *args, **kwargs):
# assert_element_absent(selector)
return self.assert_element_absent(*args, **kwargs)
def verificare_titolo(self, *args, **kwargs):
# assert_title(title)
return self.assert_title(*args, **kwargs)
def ottenere_titolo(self, *args, **kwargs):
# get_title()
return self.get_title(*args, **kwargs)
def verificare_vero(self, *args, **kwargs):
# assert_true(expr)
return self.assert_true(*args, **kwargs)
def verificare_falso(self, *args, **kwargs):
# assert_false(expr)
return self.assert_false(*args, **kwargs)
def verificare_uguale(self, *args, **kwargs):
# assert_equal(first, second)
return self.assert_equal(*args, **kwargs)
def verificare_non_uguale(self, *args, **kwargs):
# assert_not_equal(first, second)
return self.assert_not_equal(*args, **kwargs)
def aggiorna_la_pagina(self, *args, **kwargs):
# refresh_page()
return self.refresh_page(*args, **kwargs)
def ottenere_url_corrente(self, *args, **kwargs):
# get_current_url()
return self.get_current_url(*args, **kwargs)
def ottenere_la_pagina_html(self, *args, **kwargs):
# get_page_source()
return self.get_page_source(*args, **kwargs)
def indietro(self, *args, **kwargs):
# go_back()
return self.go_back(*args, **kwargs)
def avanti(self, *args, **kwargs):
# go_forward()
return self.go_forward(*args, **kwargs)
def è_testo_visto(self, *args, **kwargs): # noqa
# is_text_visible(text, selector="html")
return self.is_text_visible(*args, **kwargs)
def è_elemento_visto(self, *args, **kwargs):
# is_element_visible(selector)
return self.is_element_visible(*args, **kwargs)
def è_elemento_presente(self, *args, **kwargs):
# is_element_present(selector)
return self.is_element_present(*args, **kwargs)
def attendere_il_testo(self, *args, **kwargs):
# wait_for_text(text, selector)
return self.wait_for_text(*args, **kwargs)
def attendere_un_elemento(self, *args, **kwargs):
# wait_for_element(selector)
return self.wait_for_element(*args, **kwargs)
def attendere_un_elemento_visto(self, *args, **kwargs):
# wait_for_element_visible(selector) # Same as wait_for_element()
return self.wait_for_element_visible(*args, **kwargs)
def attendere_un_elemento_non_visto(self, *args, **kwargs):
# wait_for_element_not_visible(selector)
return self.wait_for_element_not_visible(*args, **kwargs)
def attendere_un_elemento_presente(self, *args, **kwargs):
# wait_for_element_present(selector)
return self.wait_for_element_present(*args, **kwargs)
def attendere_un_elemento_assente(self, *args, **kwargs):
# wait_for_element_absent(selector)
return self.wait_for_element_absent(*args, **kwargs)
def dormire(self, *args, **kwargs):
# sleep(seconds)
return self.sleep(*args, **kwargs)
def attendere(self, *args, **kwargs):
# wait(seconds) # Same as sleep(seconds)
return self.wait(*args, **kwargs)
def inviare(self, *args, **kwargs):
# submit(selector)
return self.submit(*args, **kwargs)
def cancellare(self, *args, **kwargs):
# clear(selector)
return self.clear(*args, **kwargs)
def js_fare_clic(self, *args, **kwargs):
# js_click(selector)
return | self.js_click(*args, **kwargs)
def js_aggiornare_testo(self, *args, **kwargs):
# js_update_text(selector, text)
return self.js_update_text(*args, **kwargs)
def js_digitare(self, *args, **kwargs) | :
# js_type(selector, text)
return self.js_type(*args, **kwargs)
def controlla_html(self, *args, **kwargs):
# inspect_html()
return self.inspect_html(*args, **kwargs)
def salva_screenshot(self, *args, **kwargs):
# save_screenshot(name)
return self.save_screenshot(*args, **kwargs)
def seleziona_file(self, *args, **kwargs):
# choose_file(selector, file_path)
return self.choose_file(*args, **kwargs)
def eseguire_script(self, *args, **kwargs):
# execute_script(script)
return self.execute_script(*args, **kwargs)
def eseguire_script_sicuro(self, *args, **kwargs):
# safe_execute_script(script)
return self.safe_execute_script(*args, **kwargs)
def attiva_jquery(self, *args, **kwargs):
# activate_jquery()
return self.activate_jquery(*args, **kwargs)
def bloccare_gli_annunci(self, *args, **kwargs):
# ad_block()
return self.ad_block(*args, **kwargs)
def saltare(self, *args, **kwargs):
# skip(reason="")
return self.skip(*args, **kwargs)
def verificare_i_collegamenti(self, *args, **kwargs):
# assert_no_404_errors()
return self.assert_no_404_errors(*args, **kwargs)
def controlla_errori_js(self, *args, **kwargs):
# assert_no_js_errors()
return self.assert_no_js_errors(*args, **kwargs)
def passa_al_frame(self, *args, **kwargs):
# switch_to_frame(frame)
return self.switch_to_frame(* |
boundary/pulse-api-cli | tests/unit/boundary/metric_create_batch_test.py | Python | apache-2.0 | 2,094 | 0.001433 | #!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable l | aw or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# | limitations under the License.
#
from unittest import TestCase
import os
import json
from boundary import MetricCreateBatch
from boundary import MetricDelete
from metric_test import MetricTest
from boundary import MetricExport
from cli_test import CLITest
from cli_runner import CLIRunner
class MetricCreateBatchTest(TestCase):
def setUp(self):
self.cli = MetricCreateBatch()
self.filename = os.path.join(os.path.dirname(__file__), 'metric_import_data.json')
def test_get_description(self):
CLITest.check_description(self, self.cli)
def test_cli_help(self):
CLITest.check_cli_help(self, self.cli)
def test_create_metric_batch(self):
filename = os.path.join(os.path.dirname(__file__), 'metric_import_data.json')
print(filename)
runner_create = CLIRunner(MetricCreateBatch())
create = runner_create.get_output(['-f', filename])
runner_export = CLIRunner(MetricExport())
export = runner_export.get_output(['-p', 'TEST_METRIC_IMPORT'])
metrics = json.loads(export)
MetricTest.metric_assert(self,
metrics['TEST_METRIC_IMPORT_A'],
'My Number of Files',
'My Files',
'My Number Of Files',
'number',
'SUM',
2000,
False)
# runner_delete = CLIRunner(MetricDelete())
# delete = runner_delete.get_output(['-n', metric_name])
|
csiu/promi2 | code/promi2.py | Python | mit | 8,709 | 0.006889 | #!/usr/bin/env python
# Author: csiu
# Created: 2015-02-02
import argparse
from ConfigParser import SafeConfigParser
import sys
import os
from utils import get_value_from_keycolonvalue_list, ensure_dir, random_string
import features
import mirna_proximity
import correlation
import gff_unify_features
import promirna
import plots
usage = """- Runs promi2
EXAMPLE:
python2.7 promi2.py -i ../test/test.gff -o ../Testout-promi2
- When the features.gff file is already available, use the '-f' option
EXAMPLE:
python2.7 promi2.py -i ../test/test-features.gff -f -o ../Testout-promi2predict
- enable plotting with "-p"
"""
def _read_params(f_param):
params_dict = {}
with open(f_param) as f:
for l in f:
k,v = l.strip().split(':')
params_dict[k] = float(v)
mu1 = params_dict['mu_promoter']
mu2 = params_dict['mu_background']
lambda1 = params_dict['lambda_promoter']
lambda2 = params_dict['lambda_background']
betas = [i for i in params_dict.keys() if i.startswith('beta')]
betas.sort()
betas = [params_dict[b] for b in betas]
return (mu1, mu2, lambda1, lambda2, betas)
def _make_prediction(prior_prom, p_prom, p_back):
if str(prior_prom).endswith('*'):
note = '*'
else:
note = ''
if p_prom >= p_back:
prediction = 'prom'+note
else:
prediction = 'back'+note
return prediction
def promi2(f_param, listoffeatures, infile, outfile):
mu1, mu2, lambda1, lambda2, betas = _read_params(f_param)
if len(betas) != len(listoffeatures)+1:
sys.exit("ERROR: number of betas does not match number of features")
with open(outfile, 'w') as out:
with open(infile) as f:
for line in f:
line = line.strip()
l = line.split('\t')
x = float(l[5])
_features = l[7].split(';')
fvalues = []
for lof in listoffeatures:
try:
fvalues.append(float(get_value_from_keycolonvalue_list(lof, _features)))
except ValueError:
fvalues.append(0)
p_prom, p_back, prior_prom, prior_back = promirna.promirna(x, mu1, mu2, lambda1, lambda2,
betas, fvalues)
prediction = _make_prediction(prior_prom, p_prom, p_back)
#line = '\t'.join([line,
# ';'.join(['prior_prom:'+str(prior_prom), 'prior_back:'+str(prior_back),
# 'prob_prom:'+str(p_prom), 'prob_back:'+str(p_back)]),
# prediction]) + '\n'
line = line + '\t%s\t%s\t%s\t%s\t%s\n' % (prior_prom, prior_back, p_prom, p_back, prediction)
out.write(line)
return
def _cleanup_extra_positions(infile, outfile):
## cleanup of extra positions
## compare miRNA positions in PROX & CORR
with open(outfile, 'w') as out:
with open(infile) as f:
for line in f:
l = line.split('\t')
descript = l[8].split('@')
if (descript[1] != '') and (descript[2] != '\n'):
info_mprox = descript[1].split(';')
prox_start = get_value_from_keycolonvalue_list('mirna_start', info_mprox)
prox_stop = get_value_from_keycolonvalue_list('mirna_stop', info_mprox)
info_corr = descript[2].split(';')
corr_start = get_value_from_keycolonvalue_list('mirna_start', info_corr)
corr_stop = get_value_from_keycolonvalue_list('mirna_stop', info_corr)
if (prox_start == corr_start) and \
(prox_stop == prox_stop):
out.write(line)
else:
out.write(line)
return outfile
def main(f_config, gff_cage, is_gff, outdir, make_plots):
cparser = SafeConfigParser()
cparser.read(f_config)
in_bname = os.path.basename(gff_cage)
if outdir == None:
outdir = 'promi2_outdir_'+in_bname+'_'+random_string(6)
ensure_dir(outdir, False)
f_param = cparser.get('promi2','params')
listoffeatures = cparser.get('promi2','features')
listoffeatures = listoffeatures.split(',')
if 'corr' in listoffeatures:
is_consider_corr = True
corrmethod = cparser.get('correlation','corrmethod')
else:
is_consider_corr = False
## PART1: Feature extraction
if not is_gff:
## feature extraction: cpg, cons, tata (features.py)
outdir_seqfeatures = os.path.join(outdir, 'seqfeatures')
ensure_dir(outdir_seqfeatures, False)
gff_1kbfeatures = os.path.join(outdir_seqfeatures, 'features_1kbseq.gff')
f_fasta = cparser.get('genome','fasta')
f_chromsizes = cparser.get('genome','chromsizes')
d_phastcons = cparser.get('cons','phastcons')
TRAP = cparser.get('tata','trap')
f_psemmatrix = cparser.get('tata','psem')
features.main(gff_cage, outdir_seqfeatures,
f_fasta, f_chromsizes, d_phastcons, TRAP, f_psemmatrix,
gff_1kbfeatures)
## feature extraction: mirna_proximity (mirna_proximity.py)
outdir_mprox = os.path.join(outdir, 'mprox')
| ensure_dir(outdir_mprox, False)
gff_mirnaprox = os.path.join(outdir_mprox, 'features_mirnaprox.gff')
gff_mirna = cparser.get('mirbase','gff2')
mirna_proximity.main(gff_cage, gff_mirna, gff_mirnaprox)
## merge extracted features (gff_unify_features.py)
gff_features = os.path.join(outdir, 'Features.1kb.mprox.'+in_bname)
gff_unify_features.main(gff_1kbfeatures, gff_mirnaprox, 'mirna_prox', '0', gff_features)
if is_consider_corr:
## merge extracted features (gff_unify_features.py) after compute correlation
gff_features_corr = os.path.join(outdir,
'Features.1kb.mprox.%s.%s' % (corrmethod, in_bname))
outdir_corr = os.path.join(outdir, 'corr')
m_mirna = cparser.get('correlation', 'srnaseqmatrix')
m_tss = cparser.get('correlation', 'cageseqmatrix')
gff_corr = correlation.main(gff_mirna, m_mirna, m_tss, corrmethod, outdir_corr)
gff_unify_features.main(gff_features, gff_corr, 'corr', '0', gff_features_corr)
gff_allfeatures = gff_features_corr
else:
gff_allfeatures = gff_features
else:
gff_allfeatures = gff_cage
with open(gff_allfeatures) as f:
l = f.readline().split('\t')
if not (':' in l[7]):
sys.exit('ERROR: this is not a features.gff formatted file')
## PART2: extract parameters & run promirna
f_prediction = os.path.join(outdir, 'Predictions.'+in_bname+'.txt')
print 'COMPUTING: "%s"...' % f_prediction
promi2(f_param, listoffeatures, gff_allfeatures, f_prediction)
## PART3: plots
if make_plots:
plotdir = os.path.join(outdir, 'plots')
ensure_dir(plotdir, False)
plots.main(f_prediction, plotdir, f_config)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--infile', dest='infile',
required=True,
help='''path to input gff input file.
Tab-separated columns should be like:
1. chrom
2. source
3. feature
4. start (+500)
5. stop (-500)
6. normalized tag count
7. strand
8. .
9. info
''')
parser.add_argument('-f', dest='is_gff',
action='store_true',
help='flag to specify that infile is already features.gff file')
parser.add_argument('-p', dest='make_plots',
action='store_true',
help='''Flag to enable plotting
This requires extra packages to b | |
bmaia/rext | core/updater.py | Python | gpl-3.0 | 3,055 | 0.008191 | #This file is part of REXT
#updater.py - script that handles updating of REXT and it's components
#Author: Ján Trenčanský
#License: GNU GPL v3
import subprocess
import time
import re
import os
import interface.utils
import core.globals
from interface.messages import print_blue
#Pull REXT from git repo
def update_rext():
subprocess.Popen("git pull", shell=True).wait()
time.sleep(4)
#Reset HEAD to discard local changes and pull
def update_rext_force():
subprocess.Popen("git reset --hard", shell=True).wait()
subprocess.Popen("git pull", shell=True).wait()
time.sleep(4)
#Download OUI file, and recreate DB
def update_oui():
if interface.utils.file_exists("./databases/oui.db") and core.globals.ouidb_conn is not None:
connection = core.globals.ouidb_conn
cursor = connection.cursor()
#Truncate database
print_blue("Truncating oui table")
cursor.execute("""DROP TABLE oui""")
cursor.execute("""CREATE TABLE oui (
id INTEGER PRIMARY KEY NOT NULL,
oui TEXT UNIQUE,
name TEXT)""")
print_blue("Downloading new OUI file")
interface.utils.wget("http://standards.ieee.org/regauth/oui/oui.txt", "./output/tmp_oui.txt")
file = open("./output/tmp_oui.txt", "r")
regex = re.compile(r"\(base 16\)")
for line in file:
| if regex.search(line) is not None:
line = "".join(line.split("\t"))
line = line.split("(")
oui = line[0].replace(" ", "")
company = line[1].split(")")[1]
company = company.replace("\n", "")
if company == " ":
| company = "Private"
try:
cursor.execute("INSERT INTO oui (oui, name) VALUES (?, ?)", [oui, company])
connection.commit()
except Exception as e:
#CONRAD CORP. and CERN + ROYAL MELBOURNE INST OF TECH share oui, this should be considered
#print(e)
#print(oui + " " + company)
#SELECT name FROM oui.oui WHERE oui = oui
#UPDATE oui.oui SET name = name+" OR "+company WHERE oui=oui
pass
#Add a few OUIs manually (from NMAP oui file)
cursor.execute("INSERT INTO oui (oui, name) VALUES ('525400', 'QEMU Virtual NIC')")
cursor.execute("INSERT INTO oui (oui, name) VALUES ('B0C420', 'Bochs Virtual NIC')")
cursor.execute("INSERT INTO oui (oui, name) VALUES ('DEADCA', 'PearPC Virtual NIC')")
cursor.execute("INSERT INTO oui (oui, name) VALUES ('00FFD1', 'Cooperative Linux virtual NIC')")
connection.commit()
try:
os.remove("./output/tmp_oui.txt")
except OSError:
pass
|
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/job_handler_broker.py | Python | apache-2.0 | 46,467 | 0.001894 | from ..broker import Broker
class JobHandlerBroker(Broker):
controller = "job_handlers"
def index(self, **kwargs):
"""Lists the available job handlers. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: No description is available for id.
:type id: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, created_by, unit_id, broker_user_name, broker_password_secure, broker_password_version, broker_exchange, broker_queue, broker_admin_queue, concurrent_limit, current_bid, status, last_status_at, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each JobHandler. Valid values are id, created_by, unit_id, broker_user_name, broker_password_secure, broker_password_version, broker_exchange, broker_queue, broker_admin_queue, concurrent_limit, current_bid, status, last_status_at, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return job_handlers: An array of the JobHandler objects that match the specified input criteria.
:rtype job_handlers: Array of JobHandler
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified job handler.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier of the job handler.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return job_handler: The job handler identified by the specified id.
:rtype job_handler: JobHandler
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available job handlers matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param broker_admin_queue: No description is available for broker_admin_queue.
:type broker_admin_queue: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param broker_exchange: No description is available for broker_exchange.
:type broker_exchange: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param broker_password_secure: No description is available for broker_password_secure.
:type broker_password_secure: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param broker_password_version: No description is available for broker_password_version.
:type broker_password_version: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param broker_queue: No description is available for broker_queue.
:type broker_queue: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param broker_user_name: No description is available for broker_user_name.
:type broker_user_name: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param concurrent_limit: No description is available for concurrent_limit.
:type concurrent_limit: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: No descript | ion is available for created_at.
:type created_at: Array of String
| ``api version min:`` 2.6
| ``api ve | rsion max:`` None
| ``required:`` False
| ``default:`` None
:param created_by: No description is available for created_by.
:type created_by: Array of String
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param current_bid: No description is available for current_bid.
:type current_bid: Array of String
| ``api version min:`` 2.6
| ``api |
thysol/CS4098 | backend/peos_notify.py | Python | gpl-2.0 | 701 | 0.011412 | #!/usr/bin/python
import os
import re
import sys
import json
import urllib
import socket
import subprocess
import cgi, cgitb
from os import listdir
from os.path | import isfile, join
#http://178.62.51.54:13930/event=CREATE&login_name=henrik&pathway_name=test_commit.pml
def peos_notify(patient_id):
EXECUTION_PATH = "../peos/os/kernel/"
#Error constants
ERROR_USER_NOT_EXIST = 1
ERROR_SCRIPT_FAIL = 2
os.chdir(os.path.dirname(os.path.realpath(__file__)))
os.chdir(EXECUTION_PATH)
process | = subprocess.Popen(["./peos", "-l", str(patient_id), "-u" ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
return output, error
|
fangxingli/hue | desktop/libs/notebook/src/notebook/models.py | Python | apache-2.0 | 6,176 | 0.009067 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import numbers
import uuid
from django.utils.html import escape
from desktop.lib.i18n import smart_unicode
from notebook.connectors.base import Notebook
# Materialize and HTML escape results
def escape_rows(rows, nulls_only=False):
data = []
for row in rows:
escaped_row = []
for field in row:
if isinstance(field, numbers.Number):
if math.isnan(field) or math.isinf(field):
escaped_field = json.dumps(field)
else:
escaped_field = field
elif field is None:
escaped_field = 'NULL'
| else:
escaped_field = smart_unicode(field, errors='replace') # Prevent error when gettin | g back non utf8 like charset=iso-8859-1
if not nulls_only:
escaped_field = escape(escaped_field).replace(' ', ' ')
escaped_row.append(escaped_field)
data.append(escaped_row)
return data
def make_notebook(name='Browse', description='', editor_type='hive', statement='', status='ready',
files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None):
from notebook.connectors.hiveserver2 import HS2Api
editor = Notebook()
if snippet_properties is None:
snippet_properties = {}
if editor_type == 'hive':
sessions_properties = HS2Api.get_properties(editor_type)
if files is not None:
_update_property_value(sessions_properties, 'files', files)
if functions is not None:
_update_property_value(sessions_properties, 'functions', functions)
if settings is not None:
_update_property_value(sessions_properties, 'settings', settings)
elif editor_type == 'impala':
sessions_properties = HS2Api.get_properties(editor_type)
if settings is not None:
_update_property_value(sessions_properties, 'files', files)
elif editor_type == 'java':
sessions_properties = [] # Java options
else:
sessions_properties = []
data = {
'name': name,
'uuid': str(uuid.uuid4()),
'description': description,
'sessions': [
{
'type': editor_type,
'properties': sessions_properties,
'id': None
}
],
'selectedSnippet': editor_type,
'type': 'query-%s' % editor_type,
'showHistory': True,
'isSaved': is_saved,
'snippets': [
{
'status': status,
'id': str(uuid.uuid4()),
'statement_raw': statement,
'statement': statement,
'type': editor_type,
'properties': {
'files': [] if files is None else files,
'functions': [] if functions is None else functions,
'settings': [] if settings is None else settings
},
'name': name,
'database': database,
'result': {}
}
]
}
if snippet_properties:
data['snippets'][0]['properties'].update(snippet_properties)
editor.data = json.dumps(data)
return editor
def make_notebook2(name='Browse', description='', is_saved=False, snippets=None):
from notebook.connectors.hiveserver2 import HS2Api
editor = Notebook()
_snippets = []
for snippet in snippets:
default_properties = {
'files': [],
'functions': [],
'settings': []
}
if snippet['type'] == 'hive':
pass
elif snippet['type'] == 'impala':
pass
elif snippet['type'] == 'java':
pass
_snippets.append(snippet)
print _snippets
data = {
'name': name,
'uuid': str(uuid.uuid4()),
'description': description,
'sessions': [
{
'type': _snippet['type'],
'properties': HS2Api.get_properties(snippet['type']),
'id': None
} for _snippet in _snippets # Non unique types currently
],
'selectedSnippet': _snippets[0]['type'],
'type': 'notebook',
'showHistory': False,
'isSaved': is_saved,
'snippets': [
{
'status': _snippet.get('status', 'ready'),
'id': str(uuid.uuid4()),
'statement_raw': _snippet.get('statement', ''),
'statement': _snippet.get('statement', ''),
'type': _snippet.get('type'),
'properties': _snippet.properties,
'name': name,
'database': _snippet.get('database'),
'result': {}
} for _snippet in _snippets
]
}
editor.data = json.dumps(data)
return editor
def import_saved_beeswax_query(bquery):
design = bquery.get_design()
return make_notebook(
name=bquery.name,
description=bquery.desc,
editor_type=_convert_type(bquery.type, bquery.data),
statement=design.hql_query,
status='ready',
files=design.file_resources,
functions=design.functions,
settings=design.settings,
is_saved=True,
database=design.database
)
def _convert_type(btype, bdata):
from beeswax.models import HQL, IMPALA, RDBMS, SPARK
if btype == HQL:
return 'hive'
elif btype == IMPALA:
return 'impala'
elif btype == RDBMS:
data = json.loads(bdata)
return data['query']['server']
elif btype == SPARK: # We should not import
return 'spark'
else:
return 'hive'
def _update_property_value(properties, key, value):
"""
Update property dict in list of properties where prop has "key": key, set "value": value
"""
for prop in properties:
if prop['key'] == key:
prop.update({'value': value})
|
AlexLitvino/i8080_simulator | microprocessor/register_pair.py | Python | apache-2.0 | 189 | 0 | class RegisterPair:
def __init__(self, name, register_high, reg | ister_low):
self.name = name
self.register_high = register_high
self.register_low = | register_low
|
AnanseGroup/map-of-innovation | mapofinnovation/controllers/uifunc.py | Python | mit | 2,555 | 0.019178 | import logging |
import json
import redis
import sys
import urllib
import os
from urlparse import urlparse
from pylons import request, response, session, tmpl_context as c, url
from pylons.d | ecorators import jsonify
from pylons.controllers.util import abort, redirect
from mapofinnovation.lib.base import BaseController, render
log = logging.getLogger(__name__)
class UifuncController(BaseController):
def index(self):
# Return a rendered front page template
markers = []
indices = {
"name": "name",
"city": "city",
"country": "country",
"website": "primary_website",
"primarytype": "primary_type",
"multitypes": "types_multiple",
"description": "description",
"latitude": "latitude",
"longitude":"longitude",
"services": "services"
}
if os.environ.get("REDIS_URL") :
redis_url = os.environ.get("REDIS_URL")
else:
redis_url = "localhost"
r = redis.from_url(redis_url)
i = 0
for key in r.scan_iter():
marker = {}
row = r.hgetall(key)
for header in indices.keys():
marker[header] = unicode(row[str(indices[header])], errors='replace')
markers.append(marker)
c.markers = json.dumps(markers)
return render('/makermap.html')
def wikipage(self,id=None):
#Return a wiki for the given space
if os.environ.get("REDIS_URL") :
redis_url = os.environ.get("REDIS_URL")
else:
redis_url = "localhost"
r = redis.from_url(redis_url)
if id is None :
return 'Provide a valid space id'
elif r.exists(id):
data = r.hgetall(id)
addresstext = str(data['street_address']).decode("ISO-8859-1")
websitetext = urllib.unquote(data['primary_website']).decode('utf8')
return render('/wikipage.html',extra_vars={'last_updated':str(data['last_updated']),'name':str(data['name']),'status':str(data['status']),'website_url':websitetext,'primarytype':str(data['primary_type']),'secondarytype':'','space_description':str(data['description']),'address':addresstext})
else :
return 'There is no space with this id. Please recheck and submit'
def about(self):
return render('/about.html')
def goals(self):
return render('/goals.html')
def userDocs(self):
return render('/user-documentation.html')
def devDocs(self):
return render('/developer-documentation.html') |
amiraliakbari/sharif-mabani-python | by-session/ta-922/j6/a1.py | Python | mit | 136 | 0.014706 | def b():
print("hooora!")
dic1 = {1: 2, 1: 4}
for i in dic | 1.items():
if 'a' = | = i:
b()
else:
break
print ":(" |
stonewell/pymterm | pymterm/cap/unknown_cap.py | Python | mit | 443 | 0.004515 | import logging
def handle(term, context, cap_turple):
cap_name, increase_params = cap_turple
if hasattr(term, cap_name):
if increase_params:
for idx in range(len(con | text.params)):
context.params[idx] -= 1 if context.params[idx] != 0 else 0
getattr(term, cap_name)(context)
else:
| logging.error('No module named:{}, params:{}'.format(cap_name, context.params))
|
Saethlin/astrotools | photometry.py | Python | mit | 5,356 | 0.020724 | #!/usr/bin/env python
import numpy as np
from astropy.io import fits
import scipy.ndimage
import scipy.fftpack
import scipy.optimize
def getcentroid(coordinates, values):
"""
Image centroid from image points im that match with a 2-d array pos, which
contains the locations of each point in an all-positive coordinate system.
"""
return np.sum(values*coordinates, axis=1) / np.sum(values)
def flatfunc(centroid, p0, p1, p2):
"""
Intended only for use with detrend().
"""
return p0*centroid[:, 0] + p1*centroid[:, 1] + p2
def detrend(flux,centroid):
"""
Detrend flux against centroid points. Returns normalized flux.
"""
for f in range(flux.shape[0]):
p, cov = scipy.optimize.curve_fit(flatfunc, centroid[f], flux[f])
flux[f] /= flatfunc(centroid[f], *p)
flux[f] /= np.median(flux[f])
return flux
def photometer(files, coords, obj, sky=None):
"""
Aperture photometery on images contained in files at initial star positions
near coords. Returns flux of each star with corresponding centroid locations.
"""
centroid = np.zeros((coords.shape[0],len(files),2))
flux = np.zeros((coords.shape[0],len(files)))
centroid[:,-1] = coords
if sky == None:
sky = obj
has_sky = sky != None
pos = np.mgrid[-sky:sky+1,-sky:sky+1]
dst = np.sqrt(np.sum(pos,0))
objap = dst <= obj
skyap = dst <= sky
objsize = np.sum(objap)
for f in range(len(files)):
im = fits.open(files[f])[0].data
if not has_sky:
skyval = np.median(im)*objsize
for c in range(coords.shape[0]):
#Could start new subprocess here
y,x = centroid[c,f-1]
if y > 0 and x > 0 and y < im.shape[0] and x < im.shape[1]:
y,x = seekmax(im,y,x)
y,x = getcentroid(*getstar(im,y,x))
if y > sky and x > sky and y < im.shape[0]-sky-1 and x < im.shape[1]-sky-1:
if has_sky:
skyval = np.median(im[y-sky:y+sky+1,x-sky:x+sky+1][skyap]) * objsize
flux[c,f] = np.sum(im[y-sky:y+sky+1,x-sky:x+sky+1][objap]) - skyval
centroid[c,f] = y,x
return flux,centroid
def find_stars(data):
#If passed a list, stack and median-combine first
if isinstance(data,list):
warps,aligned = astt.align(data)
aligned = np.asarray(aligned)
im = np.median(aligned,0)
else:
im = data
#Denoise the image with a fourier filter
fourier = np.fft.fft2(im)
fourier = np.fft.fftshift(fourier)
print(fourier.max())
fits.writeto('fourier.fits',abs(fourier),clobber=True)
exit()
#Compute the second derivative at every point
laplace = ndimage.laplace(smoothed)
#Image should be concave down where there are stars
stars = derivative < 0
#Stars should also be a local min in the laplacian
row_buffer = np.zeros(laplace.shape[0])
col_buffer = row_buffer[None,:]
above = np.vstack((laplace[1:],row_buffer[:]))
below = np.vstack((row_buffer[:,:],laplace[:-1]))
right = np.hstack((laplace[1:],row_buffer[:,:]))
stars = stars & (laplace < above) & (laplace < below) & (laplace < right)
#Denoise the image with a fourier filter
print(np.std(im))
fourier = scipy.fftpack.rfft(im)
fits.writeto('fft.fits',fourier,clobber=True)
fourier[0] = 0
fourier[-1] = 0
fourier[:,0] = 0
fourier[:,-1] = 0
test = scipy.fftpack.ifft(fourier).real
fits.writeto('ifft.fits',test,clobber=True)
print(np.std(test))
exit()
#Compute the second derivative at every point
laplace = ndimage.laplace(smoothed)
#Image should be concave down where there are stars
stars = derivative < 0
#Stars should also be a local min in the laplacian
row_buffer | = np.zeros(laplace.shape[0])
col_buffer = np.zeros(laplace.sha | pe[1][None,:])
above = np.vstack((laplace[1:],row_buffer[:]))
below = np.vstack((row_buffer[:,:],laplace[:-1]))
right = np.hstack((laplace[1:],row_buffer[:,:]))
stars = stars & (laplace < above) & (laplace < below) & (laplace < right) & (laplace < left)
#Pick a sky value
sky = np.median(im)
#Sigma threshold for sky level
signal = im > (sky + sky_sigma*np.sqrt(sky))
#Use binary erosion and propagation to remove isolated points of signal
eroded_signal = binary_erosion(signal)
signal = binary_propagation(eroded_signal,mask=signal)
#Stars are only where signal is significant
stars = stars & signal
return stars
"""
image = fits.open('test.fits')[0].data
find_stars(image)
from astropy.io import fits
im = fits.open('sample_data/test_data0.fits')[0].data
find_stars(im)
"""
"""
Simple aperture photometry on image files
"""
def do_photometry(files, program_stars):
#Find stars
#Remove program stars from list
#Determine optimal aperture (s)
#Photometer everything
#Detrend against position
#Detrend against temperature, maybe other things
#Find good standards and correct
#Return flux and time arrays
pass
|
veltzer/pdmt | pdmt/plugins/nodes/operations/debinstaller.py | Python | gpl-3.0 | 1,546 | 0.001294 | import pdmt.api
import pdmt.config
import pdmt.utils.subproc
'''
This module handles publishing and unpublishing a deb package from an apt repository
Mark Veltzer <mark@veltzer.net>
'''
class Operation(object):
def __init__(self):
super().__init__(
'debinstaller',
'install the package into the repository',
)
def run(self):
args = []
if pdmt.config.ns_apt.p_sudo:
args.append('sudo')
args.append('reprepro')
args.extend(['--basedir', pdmt.config.ns_apt.p_abs_dir])
args.extend(['--component', pdmt.config.ns_apt.p_component])
args.extend(['includedeb', pdmt.config.ns_apt.p_codename, pdmt.config.ns_apt.p_deb_file])
pdmt.utils.subproc.check_call(args)
def remove():
args = []
if pdmt.config.ns_apt.p_sudo:
args.append('sudo')
args.append('reprepro')
args.extend(['--basedir', pdmt.config.ns_apt.p_abs_dir])
args.extend(['--component', p | dmt.config.ns_apt.p_component])
args.extend(['remove', pdmt.config.ns_apt.p_codename, NAME])
pdmt.utils.subproc.check_call(args)
def command(name): |
args = []
if pdmt.config.ns_apt.p_sudo:
args.append('sudo')
args.append('reprepro')
args.append(name)
pdmt.utils.subproc.check_call(args)
def makerepo():
pass
def dumpunreferenced():
command('dumpunreferenced')
def deleteunreferenced():
command('deleteunreferenced')
|
denys-duchier/Scolar | config/softs/jaxml-3.01/test/test.py | Python | gpl-2.0 | 9,215 | 0.007162 | #! /usr/bin/env python
# Test program for jaxml
#
# (C) Jerome Alet <alet@librelogiciel.com> 2000
# You're welcome to redistribute this software under the
# terms of the GNU General Public Licence version 2.0
# or, at your option, any higher version.
#
# You can read the complete GNU GPL in the file COPYING
# which should come along with this software, or visit
# the Free Software Foundation's WEB site http://www.fsf.org
#
# $Id: test.py,v 1.13 2003/02/13 14:36:13 jerome Exp $
#
import sys
# import the jaxml module from the parent directory
sys.path.insert(0, "..")
import jaxml
print "\n\n==== TESTING XML ====\n"
# now we create an instance
# we may optionally pass a version and an encoding arguments.
x = jaxml.XML_document()
# first tag, with different attributes
# numeric values are automatically quoted
x.sometag(yes = "NO", some = "a bit", value = 5)
# this one, and till the end will be inside the previous one
x.anothertag("this tag and till the end will be inside the <sometag> ... </sometag>")
# here we save the current position
x._push()
# here some nested tags
x.whatever()
x.ilikepython()
x._text("Here we are inside <whatever><ilikepython> ... </ilikepython></whatever>")
# the following tag has nothing but attributes, we must save and restore it's
# position because it is followed by another tag (we doesn't want to enclose the following tag)
x._push()
x.someattributetag(attr = "Hey ! I'm the attribute !")
x._pop()
x.justatest("This is just a test", dummy="YES")
# here we want to continue our document
# at the same indentation level than <whatever>
x._pop()
x.dummytag("we have just escaped", value = "Fine !")
x.dummytwo("Since the previous tag and this one were called with an unnamed first parameter\nwe didn't need _push() nor _pop()")
# here we insert plain text
x._text("Near the end")
# here we insert some text just like:
# <mytag>Some dummy text</mytag>
x.mytag("Some dummy text, and no tag attributes")
# here some beautiful tag nesting
x.onetag(message="This is").anotherone("a beautiful").deeper(message = "tag nesting possibility")
# here the naming space notation for <Space:Tag>...</Space:Tag>
x.namingspace.onetag("This is how to use the naming space notation Space:Tag", wonderful="YES")
# here just a tag with attributes, but nothing in it
# we don't need to _push() and _pop() because it isn't followed by anything
x.attributetag(content = "I've got nothing enclosed in me", index = 9)
# here we save to a file
x._output("sampleXML.xml")
# but we may as well output it to the screen
print x
# test the new templating facility
# I urge you to read the following lines and look carefully at the result
# to see how this beautiful thing works !
x._text("Now we will replace some content with the new possibility of using a document as a mapping.")
x._text("This may be useful for templating without a template file, or replacing some chars with their equivalent SGML entities for example:")
x._text("Here are three accented characters, two of them which will be replaced\nwith their equivalent SGML entities: àéè")
x["nothing enclosed"] = "something enclosed"
x["SGML"] = "XML"
x["attributetag"] = "modifiedattributename"
x["é"] = "é";
x["è"] = "è";
x["à"] = "à";
# this is also available as readable attributes
sys.stderr.write('x["è"] = %s\n' % x["è"])
# and we can also delete them
del x["è"]
# or use the str() or repr() builtin functions
mydoc = "With str() or repr(), my modified document looks like:\n" + str(x) + "And that's all folks !"
print mydoc
# Now we want to test the HTML output
print "\n\n==== TESTING HTML ====\n"
page = jaxml.HTML_document()
# here we begin our html document
page.html()
# we must do a push and a pop in order for the <body> tags
# to not be enclosed between <head> and </head>
page._push()
# build the head of the document
page.head()
#
#
# Other meta tags should work fine
page._meta(name="GENERATOR", content="jaxml.py v2.24 from Jerome Alet - alet@librelogiciel.com")
page._meta(name="DESCRIPTION", content="A CGI document, to test jaxml.py")
page._meta(name="KEYWORDS", content="python, jaxml, linux")
page.title("A CGI test document")
# here we exit from the <head> ... </head>
page._pop()
# we begin the body
page.body(bgcolor="pink")
# here we insert a dumb text
page._text("A small text")
# we do a push to be able to exit from the <form> ... </form>
page._push()
page.form(action="/cgi-bin/jerome/ok.py", method="POST")
page.h1("Form's title")
# to be able to exit from <select> ... </select>
page._push()
page.select(name="choice", size="1", multiple="multiple")
page.option("Choice number 1")
page.option("Choice number 2", selected="selected")
page.option("Choice number 3")
# exit from <select> ... </select>
page._pop()
page.h3("Second part of the Form")
page._br()
page._textinput(name="dumbstring", size="50")
page._submit()
page._reset()
# here we exit from the <form> ... </form>
page._pop()
page._text("here we should be outside of the form")
page._text("and there we should be one the same line visually but on two different lines in the html file")
page.a("Click on Me", href="http://www.slashdot.org")
page.pre("Hello !!!\n\t\tBye Bye\n\n")
page._text("Here we should be outside of the PRE.../PRE tag")
# then we insert some text
page._text("Just below you will see some lines of text which are included from a template file, with variables substitution:")
page._br()
# then we include the template file
page._template("template.htt", font_color='red', link_to_my_homepage="<a href='http://www.librelogiciel.com/'>My website</a>", another_variable="<br /><center>Thank you for trying</center>")
# then some separation
page.hr(width="33%", noshade="noshade")
# here we do the output to the screen
page._output()
# and here we do the output to a file
page._output("sampleHTML.html")
# Now we want to test the CGI/HTML output
print "\n\n==== TESTING CGI ====\n"
# just some dummy values
page = jaxml.CGI_document(encoding = "utf-8", content_type="text/html", version = "3.0")
# to do a redirection, just do
# page.set_redirect("http://www.librelogiciel.com/")
# then just call page.output("")
# here again we can do that whenever we want (before output)
# text/html is the default for _set_content_type()
#page._set_content_type("application/pdf")
# to define a pragma, just use:
# page._set_pragma("pragma_name")
# we can do that whenever we want, (before output)
# to define an expiration date, just use:
# page._set_expires("expiration_date")
# we can do that whenever we want, (before output)
# Maybe this should be done by the class's __init__ function
# but I don't think so in order for us to have more control
page._default_header(title = 'a CGI document')
# we begin the body
page.body(bgcolor="pink")
# here we insert a dumb text
page._text("A small text")
# we do a push to be able to exit from the <form> ... </form>
page._push()
page.form(action="/cgi-bin/jerome/ok.py", method="POST")
page.h1("Form's title")
# | to be able to exit from <select> ... </select>
page._push()
page.select(name="choice", size="1")
page.option("Choice number 1")
page.option("Choice number 2")
page.option("Choice number 3", selected="selected")
# exit from <sel | ect> ... </select>
page._pop()
page.h3("Second part of the Form")
page._br()
page._textinput(name="dumbstring", size="50")
page._submit()
page._reset()
# here we exit from the <form> ... </form>
page._pop()
page._text("here we should be outside of the form")
page._text("and there we should be one the same line visually but on two different lines in the html file")
page.a("Click on Me", href="http://www.slashdot.org")
page.pre("Hello !!!\n\t\tBye Bye\n\n")
page._text("Here we should be outside of the PRE.../PRE tag")
# here we define a debug file which will receive the CGI output too
page._set_debug("CGI_debug.html")
# here we do the output
# for a CGI script, give an empty string (for stdout)
# or None, or nothing, unless you want to debug (give a filename) or a file object
page._output("")
# Now we want to test the arithmetic operations
print "\n\n==== TESTING ARITHMETIC ====\n"
pri |
bitesofcode/xqt | xqt/wrappers/pyside.py | Python | lgpl-3.0 | 9,830 | 0.007121 | """ Sets up the Qt environment to work with various Python Qt wrappers """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2012, Projex Software'
__license__ = 'LGPL'
# maintenance information
__maintainer__ = 'Projex Software'
__email__ = 'team@proj | exsoftware.com'
# requires at least the QtCore module
import PySide
import logging
import re
import sys
import xml.parsers.expat
from PySide import QtCore, QtGui, QtUiTools
from xml.etree import ElementT | ree
from ..lazyload import lazy_import
log = logging.getLogger(__name__)
class XThreadNone(object):
"""
PySide cannot handle emitting None across threads without crashing.
This variable can be used in place of None.
:usage |class A(QtCore.QObject):
| valueChanged = QtCore.Signal('QVariant')
| def setValue(self, value):
| self._value = value
| emitter = value if value is not None else QtCore.THREADSAFE_NONE
| self.valueChanged.emit(emitter)
|
|class B(QtCore.QObject):
| def __init__(self, a):
| super(B, self).__init__()
| a.valueChanged.connect(self.showValue)
| def showValue(self, value):
| if value == None:
| print 'value does equal none'
| if value is None:
| print 'value unfortunately not IS none'
|
|a = A()
|b = B()
|t = QtCore.QThread()
|a.moveToThread(t)
|t.start()
|a.setValue(None) # will crash if not using THREADSAFE_NONE
"""
def __nonzero__(self):
return False
def __repr__(self):
return 'None'
def __str__(self):
return 'None'
def __eq__(self, other):
return id(other) == id(self) or other is None
#----------------------------------------------------------------------
SIGNAL_BASE = QtCore.SIGNAL
def SIGNAL(signal):
match = re.match(r'^(?P<method>\w+)\(?(?P<args>[^\)]*)\)?$', str(signal))
if not match:
return SIGNAL_BASE(signal)
method = match.group('method')
args = match.group('args')
args = re.sub(r'\bPyQt_PyObject\b', 'QVariant', args)
args = re.sub(r'\bobject\b', 'QVariant', args)
new_signal = '%s(%s)' % (method, args)
return SIGNAL_BASE(new_signal)
#----------------------------------------------------------
class UiLoader(QtUiTools.QUiLoader):
def __init__(self, baseinstance):
super(UiLoader, self).__init__()
self.dynamicWidgets = {}
self._baseinstance = baseinstance
def createAction(self, parent=None, name=''):
"""
Overloads teh create action method to handle the proper base
instance information, similar to the PyQt4 loading system.
:param parent | <QWidget> || None
name | <str>
"""
action = super(UiLoader, self).createAction(parent, name)
if not action.parent():
action.setParent(self._baseinstance)
setattr(self._baseinstance, name, action)
return action
def createActionGroup(self, parent=None, name=''):
"""
Overloads teh create action method to handle the proper base
instance information, similar to the PyQt4 loading system.
:param parent | <QWidget> || None
name | <str>
"""
actionGroup = super(UiLoader, self).createActionGroup(parent, name)
if not actionGroup.parent():
actionGroup.setParent(self._baseinstance)
setattr(self._baseinstance, name, actionGroup)
return actionGroup
def createLayout(self, className, parent=None, name=''):
"""
Overloads teh create action method to handle the proper base
instance information, similar to the PyQt4 loading system.
:param className | <str>
parent | <QWidget> || None
name | <str>
"""
layout = super(UiLoader, self).createLayout(className, parent, name)
setattr(self._baseinstance, name, layout)
return layout
def createWidget(self, className, parent=None, name=''):
"""
Overloads the createWidget method to handle the proper base instance
information similar to the PyQt4 loading system.
:param className | <str>
parent | <QWidget> || None
name | <str>
:return <QWidget>
"""
className = str(className)
# create a widget off one of our dynamic classes
if className in self.dynamicWidgets:
widget = self.dynamicWidgets[className](parent)
if parent:
widget.setPalette(parent.palette())
widget.setObjectName(name)
# hack fix on a QWebView (will crash app otherwise)
# forces a URL to the QWebView before it finishes
if className == 'QWebView':
widget.setUrl(QtCore.QUrl('http://www.google.com'))
# create a widget from the default system
else:
widget = super(UiLoader, self).createWidget(className, parent, name)
if parent:
widget.setPalette(parent.palette())
if parent is None:
return self._baseinstance
else:
setattr(self._baseinstance, name, widget)
return widget
#----------------------------------------------------------
class Uic(object):
def compileUi(self, filename, file):
import pysideuic
pysideuic.compileUi(filename, file)
def loadUi(self, filename, baseinstance=None):
"""
Generate a loader to load the filename.
:param filename | <str>
baseinstance | <QWidget>
:return <QWidget> || None
"""
try:
xui = ElementTree.parse(filename)
except xml.parsers.expat.ExpatError:
log.exception('Could not load file: %s' % filename)
return None
loader = UiLoader(baseinstance)
# pre-load custom widgets
xcustomwidgets = xui.find('customwidgets')
if xcustomwidgets is not None:
for xcustom in xcustomwidgets:
header = xcustom.find('header').text
clsname = xcustom.find('class').text
if not header:
continue
if clsname in loader.dynamicWidgets:
continue
# modify the C++ headers to use the Python wrapping
if '/' in header:
header = 'xqt.' + '.'.join(header.split('/')[:-1])
# try to use the custom widgets
try:
__import__(header)
module = sys.modules[header]
cls = getattr(module, clsname)
except (ImportError, KeyError, AttributeError):
log.error('Could not load %s.%s' % (header, clsname))
continue
loader.dynamicWidgets[clsname] = cls
loader.registerCustomWidget(cls)
# load the options
ui = loader.load(filename)
QtCore.QMetaObject.connectSlotsByName(ui)
return ui
class QDialog(QtGui.QDialog):
def __init__(self, *args):
|
manqala/erpnext | erpnext/schools/doctype/student_batch_name/test_student_batch_name.py | Python | gpl-3.0 | 300 | 0.006667 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe | Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Student Batch Name')
class TestStudentBatchName(unitt | est.TestCase):
pass
|
hajicj/safire | safire/learning/learners/__init__.py | Python | gpl-3.0 | 50 | 0.02 | from .base_sgd_ | learner import BaseSGD | Learner
|
tiagoprn/experiments | pyramid_basics/request_response/tutorial/views.py | Python | mit | 669 | 0.001495 | from pyramid.httpexceptions import HTTPFound
from pyramid.response import Response
from pyramid.view import (
view_config,
view_defaults
)
@view_defaults(renderer='home.p | t')
class TutorialViews:
def __init__(self, request):
self.request = request
@view_config(route_name='home')
def home(self): # Here we redirect to the p | lain route below
return HTTPFound(location='/plain')
@view_config(route_name='plain')
def plain(self):
name = self.request.params.get('name', 'No Name Provided')
body = 'URL %s with name: %s' % (self.request.url, name)
return Response(content_type='text/plain', body=body)
|
japeq/blackbeltsorvihero | create-pack.py | Python | lgpl-2.1 | 689 | 0.033382 | #!/usr/bin/env python
import os
pack = open('BlackBeltSorviHero.dat', 'w')
length = 0
offset = 0
for i in [1, 2]:
leng | th = 0
for fname in os.listdir('data'):
f = open('data/' + fname)
f.seek(0, 2)
size = f.tell()
f.close()
length += len("%s %d %d\n" % (fname, offset, size))
offset += size
offset = length + 4
for fname in | os.listdir('data'):
f = open('data/' + fname)
f.seek(0, 2)
size = f.tell()
f.close()
pack.write("%s %d %d\n" % (fname, offset, size))
offset += size
pack.write("END\n")
for fname in os.listdir('data'):
f = open('data/' + fname)
while True:
s = f.read(4096)
if not s:
break
pack.write(s)
size = f.tell()
f.close()
pack.close()
|
pombredanne/sparkey-python | test/binary_test.py | Python | apache-2.0 | 2,128 | 0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2013 | Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses | /LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sparkey
import tempfile
import os
import binascii
import unittest
keys = """
a7cb5f92f019fda84d5dd73c257d6f724402d56a
0fae6c3bec0e162343afee39009c8b7e7ad77747
1bff07d74a2080e1ce2b90b12f30f581f993b56f
d04d6442f15527716e89d012018718d124ac5897
7b3605d73c5426f0600acd73535c1a7c96c4ffb9
23c7102024d4aeb4b641db7370083a87586dea43
3fa47cce74af2e39a67d3bf559d8ba2c81688963
280ed99d30b701b97d436b3ac57231e9e38e8a4a
6706a6c6c7ea2f4cfe1eb8dd786427675c4cbb4b
a8a39e52b08763ce1610400f0e789b798e89b885
2d70d150c52804485bc04367155ae4a2ff89768f
28547a874f734dc7062c859e8409a39d7903f9f1
8906ee2fcc0f62f782a9c95557bb785e9145cc33
cec120769a81c544ff171ff21c5b66217103f038
f6a714ad3b43963fe38ab3541286f9440ae96d16
a715a608f9baf1c26e0c59c72592a2b19412270b
30f7286d1100f4c115add1df87312e00a6b71012
059c6aa8b39796b9e6c10a70ac84a209eeed3c81
f9f982ba4ea5906e455cef05036700948ed4c576
""".split('\n')
class TestBinary(unittest.TestCase):
def setUp(self):
self.logfile = tempfile.mkstemp()[1]
self.hashfile = tempfile.mkstemp()[1]
def tearDown(self):
os.remove(self.logfile)
os.remove(self.hashfile)
def test_binary(self):
writer = sparkey.HashWriter(self.hashfile, self.logfile)
for key in keys:
writer.put(binascii.unhexlify(key), 'value')
writer.close()
reader = sparkey.HashReader(self.hashfile, self.logfile)
for key in keys:
self.assertEqual('value', reader[binascii.unhexlify(key)])
reader.close()
|
ladybug-analysis-tools/ladybug-core | ladybug/climatezone.py | Python | gpl-3.0 | 3,407 | 0.002935 | # coding=utf-8
"""Functions for computing climate classifications/zones from weather data."""
from __future__ import division
from .datacollection import HourlyContinuousCollection
def ashrae_climate_zone(dry_bulb_temperature, annual_precipitation=None):
"""Estimate the ASHRAE climate zone from a single year of dry bulb temperature.
Note:
[1] American Society of Heating Refrigerating and Air-Conditioning Engineers.
2010. ASHRAE 90.1-2010, Table B-4 International Climate Zone Definitions.
Args:
dry_bulb_temperature: A HourlyContinuousCollection of air temperature data,
typically coming from an EPW.
annual_precipitation: A number for the total annual liquid precipitation
depth in millimeters. This is used to determine whether the resulting
climate has the "dry" classification. If None, the climate will always
be assumed to be humid (type "A"), which tends to be more common than
the dry classification (type "B").
Returns:
Text for the ASHRAE climate zone classification (eg. "4A").
"""
# check the input dry_bulb_temperature
dbt = dry_bulb_temperature
assert isinstance(dbt, HourlyContinuousCollection), \
'Expected HourlyContinuousCollection for ashrae_climate_zone ' \
'dry_bulb_temperature. Got {}.'.format(type(dbt))
aper = dbt.header.analysis_period
assert aper.is_annual, 'ashrae_climate_zone dry_bulb_temperature must be annual.'
assert aper.timestep == 1, \
'ashrae | _climate_zone dry_bulb_temperature must have | a timestep of 1.'
if dbt.header.unit != 'C':
dbt = dbt.to_unit('C')
# compute the number of heating and cooling degree days
cooling_deg_days, heating_deg_days = 0, 0
for t in dbt.values:
cdd = (t - 10) / 24 if t > 10 else 0
cooling_deg_days += cdd
hdd = (18 - t) / 24 if t < 18 else 0
heating_deg_days += hdd
# get the climate zone number from analysis of the degree days
potential_c, no_letter = False, False
if cooling_deg_days > 5000:
cz_number = '1'
elif cooling_deg_days > 3500:
cz_number = '2'
elif cooling_deg_days > 2500:
cz_number = '3'
elif cooling_deg_days <= 2500 and heating_deg_days <= 2000:
cz_number, potential_c = '3', True
elif cooling_deg_days <= 2500 and heating_deg_days <= 3000:
cz_number = '4'
elif heating_deg_days <= 3000:
cz_number, potential_c = '4', True
elif heating_deg_days <= 4000:
cz_number, potential_c = '5', True
elif heating_deg_days <= 5000:
cz_number = '6'
elif heating_deg_days <= 7000:
cz_number, no_letter = '7', True
else:
cz_number, no_letter = '8', True
# determine the letter of the climate zone and return the result
if no_letter:
return cz_number
if potential_c:
month_temps = dry_bulb_temperature.average_monthly()
if -3 < month_temps.min < 18 and month_temps.max < 22:
if len([mon for mon in month_temps if mon > 10]) >= 4:
return '{}C'.format(cz_number)
if annual_precipitation is not None:
precipitation_limit = 20 * (dry_bulb_temperature.average + 7)
if annual_precipitation < precipitation_limit:
return '{}B'.format(cz_number)
return '{}A'.format(cz_number)
|
yahoo/Image-Builder | build.py | Python | apache-2.0 | 15,634 | 0.002111 | #!/usr/bin/python
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import json
import optparse
import os
import shutil
import sys
import tarfile
import tempfile
import traceback
import urllib
import uuid
from contextlib import (closing, contextmanager)
from builder import modules
from builder import util
from builder.downloader import tar_ball
import tempita
# Todo allow these to be configurable??
HASH_ROUTINE = 'md5'
# The first partition starts at block 63, and that each block is 512 bytes.
# So partition 1 starts at byte 32256
PART_OFFSET = 63 * 512
@contextmanager
def cmd_undo(undo_how):
try:
yield None
finally:
try:
util.subp(undo_how)
except:
pass
def import_module(module_name):
__import__(module_name)
return sys.modules[module_name]
def run_modules(root_dir, config):
config = copy.deepcopy(config)
mods = config.pop('modules', None)
if not mods:
mods = []
failures = []
which_ran = []
for real_name in mods:
name = real_name.strip()
name = name.replace('-', '_')
if not name:
continue
try:
which_ran.append(real_name)
mod_name = "%s.%s" % (util.obj_name(modules), name)
mod = import_module(mod_name)
| fu | nctor = getattr(mod, 'modify')
# Give the modules a copy of the config
# and not the 'real' thing, so that
# they can't screw it up...
args = [real_name, root_dir, copy.deepcopy(config)]
functor(*args)
except:
print("Exception in module %r:" % (real_name))
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
failures.append(real_name)
return (which_ran, failures)
def fix_fstab(root_dir, fstype):
# /etc/fstab format
# <file system> <dir>
# <type> <options> <dump> <pass>
lines = [
'# Generated on %s' % (util.time_rfc2822()),
'%s%14s%14s%14s%14s%6s' % ('LABEL=root',
'/', fstype, 'defaults', '0', '0')
]
contents = "\n".join(lines)
print("Writing a new fstab:")
print(contents)
util.write_file(util.abs_join(root_dir, 'etc', 'fstab'),
"%s\n" % (contents))
def dd_off(loop_dev, tmp_dir, block_size='32768k'):
tmp_fn = tempfile.mktemp(dir=tmp_dir, suffix='.raw')
cmd = [
'dd',
'if=%s' % (loop_dev),
'bs=%s' % (block_size),
'of=%s' % (tmp_fn),
]
util.subp(cmd, capture=False)
return tmp_fn
def hash_file(path, out_fn, routine):
hasher = hashlib.new(routine)
def hash_cb(_byte_am, chunk):
hasher.update(chunk)
base_name = os.path.basename(path)
with open(path, 'rb') as in_fh:
byte_size = os.path.getsize(path)
with open(os.devnull, 'wb') as out_fh:
util.pretty_transfer(in_fh, out_fh,
name="%s hashing %s" % (routine.capitalize(), base_name),
chunk_cb=hash_cb, max_size=byte_size)
# The md5 sum program produces this output format, so mirror that...
digest = hasher.hexdigest().lower()
contents = "%s %s\n" % (digest, os.path.basename(path))
util.write_file(out_fn, contents)
def transfer_into_tarball(path, arc_name, tb):
fns = [arc_name]
util.print_iterable(fns,
header="Adding the following to your tarball %s"
% (util.quote(tb.name)))
print("Please wait...")
tb.add(path, arc_name, recursive=False)
def make_virt_xml(kernel_fn, ram_fn, root_fn):
params = {
'name': uuid.uuid5(uuid.NAMESPACE_URL,
# Just a fake url to get a uuid
'http://images.yahoo.com/%s/%s/%s' %
(urllib.quote(root_fn),
urllib.quote(kernel_fn),
urllib.quote(ram_fn))),
# 512 MB of ram should be enough for everyone
'memory': (512 * 1024 * 1024),
# Add a fake basepath on, to ensure
# that users replace this since it apparently
# requires a fully specified path to work
'kernel': "{basepath}/" + os.path.basename(kernel_fn),
'initrd': "{basepath}/" + os.path.basename(ram_fn),
'root': "{basepath}/" + os.path.basename(root_fn),
}
tpl_c = util.load_file(util.abs_join('templates', 'virt.xml'))
tpl = tempita.Template(tpl_c)
return tpl.substitute(**params)
def ec2_convert(raw_fn, out_fn, out_fmt, strip_partition, compress):
# Extract the ramdisk/kernel
devname = create_loopback(raw_fn, PART_OFFSET)
with util.tempdir() as tdir:
img_dir = os.path.join(tdir, 'img')
root_dir = os.path.join(tdir, 'mnt')
util.ensure_dirs([img_dir, root_dir])
with cmd_undo(['losetup', '-d', devname]):
print("Copying off the ramdisk and kernel files.")
# Mount it
util.subp(['mount', devname, root_dir])
with cmd_undo(['umount', root_dir]):
# Find the right files
fns = {}
for fn in os.listdir(util.abs_join(root_dir, 'boot')):
if fn.endswith('.img') and fn.startswith('initramfs-'):
fns['ramdisk'] = fn
if fn.startswith('vmlinuz-'):
fns['kernel'] = fn
if fn.startswith('initrd-') and fn.endswith('.img'):
fns['base'] = fn
rd_fn = fns.get('ramdisk')
k_fn = fns.get('kernel')
if (not rd_fn and not k_fn) and 'base' in fns:
kid = fns['base']
kid = kid[0:-len('.img')]
kid = kid[len('initrd-'):]
cmd = ['chroot', root_dir,
'/sbin/mkinitrd', '-f',
os.path.join('/boot', fns['base']),
kid]
util.subp(cmd, capture=False)
if os.path.isfile(util.abs_join(root_dir, "boot",
"initramfs-%s.img" % (kid))):
rd_fn = "initramfs-%s.img" % (kid)
if os.path.isfile(util.abs_join(root_dir, "boot",
"vmlinuz-%s" % (kid))):
k_fn = "vmlinuz-%s" % (kid)
if not rd_fn:
raise RuntimeError("No initramfs-*.img file found")
if not k_fn:
raise RuntimeError("No vmlinuz-* file found")
shutil.move(util.abs_join(root_dir, 'boot', rd_fn),
util.abs_join(img_dir, rd_fn))
shutil.move(util.abs_join(root_dir, 'boot', k_fn),
util.abs_join(img_dir, k_fn))
# Copy off the data (minus the partition info)
if strip_partition:
print("Stripping off the partition table.")
print("Please wait...")
part_stripped_fn = dd_off(devname, tdir)
# Replace the orginal 'raw' file
if strip_partition:
shutil.move(part_stripped_fn, raw_fn)
# Apply some tune ups
cmd = [
'tune2fs',
# Set the volume label of the filesystem
'-L', 'root',
raw_fn
]
util.subp(cmd, capture=False)
# Convert it to |
flaing/gemrb | gemrb/GUIScripts/GUIRECCommon.py | Python | gpl-2.0 | 19,924 | 0.044017 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2011 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# code shared between the common GUIREC and that of iwd2 (pst)
import GemRB
import GameCheck
import GUICommon
import Portrait
from GUIDefines import *
from ie_stats import IE_SEX, IE_RACE, IE_MC_FLAGS, MC_EXPORTABLE
from ie_restype import RES_WAV
BiographyWindow = None
CustomizeWindow = None
SubCustomizeWindow = None
SubSubCustomizeWindow = None
ExportWindow = None
NameField = ExportDoneButton = None
ScriptsTable = None
RevertButton = None
if GameCheck.IsBG2() or GameCheck.IsBG1():
BioStrRefSlot = 74
else:
BioStrRefSlot = 63
if GameCheck.IsBG2() or GameCheck.IsIWD2():
PortraitNameSuffix = "L"
else:
PortraitNameSuffix = "G"
PortraitPictureButton = None
PortraitList1 = PortraitList2 = RowCount1 = RowCount2 = None
# the available sounds
if GameCheck.IsIWD1() or GameCheck.IsIWD2():
SoundSequence = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', \
'13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', \
'25', '26', '27', '28', '29', '30', '31']
else:
SoundSequence = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', \
'm', 's', 't', 'u', 'v', '_', 'x', 'y', 'z', '0', '1', '2', \
'3', '4', '5', '6', '7', '8', '9']
SoundIndex = 0
VoiceList = None
OldVoiceSet = None
Gender = None
def OpenCustomizeWindow ():
import GUIREC
global CustomizeWindow, ScriptsTable, Gender
pc = GemRB.GameGetSelectedPCSingle ()
if GemRB.GetPlayerStat (pc, IE_MC_FLAGS)&MC_EXPORTABLE:
Exportable = 1
else:
Exportable = 0
ScriptsTable = GemRB.LoadTable ("SCRPDESC")
GUIREC.ColorTable = GemRB.LoadTable ("CLOWNCOL")
Gender = GemRB.GetPlayerStat (pc, IE_SEX)
CustomizeWindow = GemRB.LoadWindow (17)
PortraitSelectButton = CustomizeWindow.GetControl (0)
PortraitSelectButton.SetText (11961)
if not Exportable:
PortraitSelectButton.SetState (IE_GUI_BUTTON_DISABLED)
SoundButton = CustomizeWindow.GetControl (1)
SoundButton.SetText (10647)
if not Exportable:
SoundButton.SetState (IE_GUI_BUTTON_DISABLED)
if not GameCheck.IsIWD2():
ColorButton = CustomizeWindow.GetControl (2)
ColorButton.SetText (10646)
ColorButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUIREC.OpenColorWindow)
if not Exportable:
ColorButton.SetState (IE_GUI_BUTTON_DISABLED)
ScriptButton = CustomizeWindow.GetControl (3)
ScriptButton.SetText (17111)
#This button does not exist in bg1 and pst, but theoretically we could create it here
if not (GameCheck.IsBG1() or GameCheck.IsPST()):
BiographyButton = CustomizeWindow.GetControl (9)
BiographyButton.SetText (18003)
BiographyButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenBiographyEditWindow)
if not Exportable:
BiographyButton.SetState (IE_GUI_BUTTON_DISABLED)
TextArea = CustomizeWindow.GetControl (5)
TextArea.SetText (11327)
CustomizeDoneButton = CustomizeWindow.GetControl (7)
CustomizeDoneButton.SetText (11973)
CustomizeDoneButton.SetState (IE_GUI_BUTTON_ENABLED)
CancelButton = CustomizeWindow.GetControl (8)
CancelButton.SetText (13727)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
PortraitSelectButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenPortraitSelectWindow)
SoundButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenSoundWindow)
ScriptButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenScriptWindow)
CustomizeDoneButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CustomizeDonePress)
CancelButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CustomizeCancelPress)
CustomizeWindow.ShowModal (MODAL_SHADOW_GRAY)
return
def CustomizeDonePress ():
CloseCustomizeWindow ()
return
def CustomizeCancelPress ():
CloseCustomizeWindow ()
return
def CloseCustomizeWindow ():
import GUIREC
global CustomizeWindow
if CustomizeWindow:
CustomizeWindow.Unload ()
CustomizeWindow = None
GUIREC.UpdateRecordsWindow ()
return
def OpenPortraitSelectWindow ():
global PortraitPictureButton, SubCustomizeWindow
SubCustomizeWindow = GemRB.LoadWindow (18)
PortraitPictureButton = SubCustomizeWindow.GetControl (0)
PortraitPictureButton.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_NO_IMAGE,OP_SET)
PortraitPictureButton.SetState (IE_GUI_BUTTON_LOCKED)
PortraitLeftButton = SubCustomizeWindow.GetControl (1)
PortraitLeftButton.SetState (IE_GUI_BUTTON_ENABLED)
PortraitLeftButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, PortraitLeftPress)
PortraitRightButton = SubCustomizeWindow.GetControl (2)
PortraitRightButton.SetState (IE_GUI_BUTTON_ENABLED)
PortraitRightButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, PortraitRightPress)
PortraitDoneButton = SubCustomizeWindow.GetControl (3)
PortraitDoneButton.SetState (IE_GUI_BUTTON_ENABLED)
PortraitDoneButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, PortraitDonePress)
PortraitDoneButton.SetText (11973)
PortraitDoneButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
PortraitCancelButton = SubCustomizeWindow.GetControl (4)
PortraitCancelButton.SetState (IE_GUI_BUTTON_ENABLED)
PortraitCancelButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CloseSubCustomizeWindow)
PortraitCancelButton.SetText (13727)
PortraitCancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
PortraitCustomButton = SubCustomizeWindow.GetControl (5)
PortraitCustomButton.SetState (IE_GUI_BUTTON_ENABLED)
PortraitCustomButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenCustomPortraitWindow)
PortraitCustomButton.SetText (17545)
# get players gender and portrait
Pc = GemRB.GameGetSelectedPCSingle ()
PcPortrait = GemRB.GetPlayerPortrait(Pc,0)
# initialize and set portrait
Portrait.Init (Gender)
Portrait.Set (PcPortrait)
PortraitPictureButton.SetPicture (Portrait.Name () + PortraitNameSuffix, "NOPORTLG")
SubCustomizeWindow.ShowModal (MODAL_SHADOW_GRAY)
return
def PortraitDonePress ():
pc = GemRB.GameGetSelectedPCSingle ()
# eh, different sizes
if GameCheck.IsBG2() | :
GemRB.FillPlayerInfo (pc, Portrait.Name () + "M", Portrait.Name () + "S")
else:
GemRB.FillPlayerInfo (pc, Portrait.Name () + "L", Portrait.Name () + "S | ")
CloseSubCustomizeWindow ()
return
def PortraitLeftPress ():
global PortraitPictureButton
PortraitPictureButton.SetPicture (Portrait.Previous () + PortraitNameSuffix, "NOPORTLG")
def PortraitRightPress ():
global PortraitPictureButton
PortraitPictureButton.SetPicture (Portrait.Next () + PortraitNameSuffix, "NOPORTLG")
def OpenCustomPortraitWindow ():
global SubSubCustomizeWindow
global PortraitList1, PortraitList2
global RowCount1, RowCount2
SubSubCustomizeWindow = GemRB.LoadWindow (19)
CustomPortraitDoneButton = SubSubCustomizeWindow.GetControl (10)
CustomPortraitDoneButton.SetState (IE_GUI_BUTTON_DISABLED)
CustomPortraitDoneButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CustomPortraitDonePress)
CustomPortraitDoneButton.SetText (11973)
CustomPortraitDoneButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CustomPortraitCancelButton = SubSubCustomizeWindow.GetControl (11)
CustomPortraitCancelButton.SetState (IE_GUI_BUTTON_ENABLED)
CustomPortraitCancelButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CloseSubSubCustomizeWindow)
CustomPortraitCancelButton.SetText (13727)
CustomPortraitCancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
if not GameCheck.IsIWD1():
SmallPortraitButton = SubSubCustomizeWindow.GetControl (1)
SmallPortraitButton.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_NO_IMAGE,OP_SET)
LargePortraitButton = SubSubCustomizeWindow.GetControl (0)
LargePortraitButton.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_NO_IMAGE,OP_SET)
# Portrait List Large |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/discovery_setting_broker.py | Python | apache-2.0 | 69,668 | 0.002081 | from ..broker import Broker
class DiscoverySettingBroker(Broker):
controller = "discovery_settings"
def index(self, **kwargs):
"""Lists the available discovery settings. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the discovery setting.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DiscoverySetting. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, created_by, updated_by, ping_sweep_ind, smart_ping_sweep_ind, start_blackout_schedule, blackout_duration, virtual_network_id, start_port_control_blackout_schedule, port_control_blackout_duration, cidr_count. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return discovery_settings: An array of the DiscoverySetting objects that match the specified input criteria.
:rtype discovery_settings: Array of DiscoverySetting
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available d | iscovery settings matching the input criteria. This met | hod provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param range_type: The discovery setting range type (CIDR, RANGE, SEED, STATIC, WILDCARD).
:type range_type: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier collector assigned to the discovery setting.
:type UnitID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, range_value, range_start, range_end, range_start_numeric, range_end_numeric, range_mask, range_type, discovery_status, created_at, updated_at, UnitID, cre |
janelia-ros/zaber_stage_ros | nodes/pose_and_debug_publisher.py | Python | bsd-3-clause | 1,472 | 0.01019 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import roslib; roslib.load_manifest('zaber_stage')
import rospy
import actionlib
from geometry_msgs.msg import Pose
from zaber_stage.msg import PoseAndDebugInfo
from zaber_stage.srv import GetPoseAndDebugInfo,GetPoseAndDebugInfoResponse
def pose_publisher():
rospy.init_node('zaber_stage_pose_and_debug_publisher')
rospy.loginfo('zaber_stage pose_and_debug_publisher...')
rate = rospy.Rate(4)
pub_pose = rospy.Publisher('/zaber_stage_node/pose',Pose,queue_size=10 | )
pub_pose_and_debug = rospy.Publisher('/zaber_stage_node/pose_and_debug_info',PoseAndDebugInfo,queue_size=10)
rospy.wait_for_service('/zaber_stage_node/get_pose_and_debug_info')
get_pose_and_debug_info = rospy.ServiceProxy('/zaber_stage_node/get_pose_and_debug_info',GetPoseAndDebugInfo,persistent=True)
while not rospy.is_shutdown():
try:
response = get_pose_and_debug_info()
if not respons | e.pose_and_debug_info.zaber_response_error:
pub_pose.publish(response.pose_and_debug_info.pose)
pub_pose_and_debug.publish(response.pose_and_debug_info)
except rospy.ServiceException, e:
rospy.logwarn('zaber_stage pose_and_debug_publisher service call failed! %s'%e)
print "Service call failed: %s"%e
rate.sleep()
if __name__ == '__main__':
try:
pose_publisher()
except rospy.ROSInterruptException:
pass
|
cmand/scamper | sc_tracebox.py | Python | bsd-3-clause | 7,053 | 0.013895 | #!/usr/bin/env python
#
# Program: $Id: $
# Author: R | obert Beverly <rbeverly@nps.edu>
# Description: Experimental tracebox warts parser
import sys
import struct
import dpkt
from sc_warts import *
if dpkt.__version__ == '1.8':
print "Upgrade dpkt"
sys.exit(-1)
TRACEBOXTYPE = 0x0c
def dict_diff(a, b):
diff = dict()
for k in a:
if k in b:
if b[k] != a[k]:
diff[k] = (a[k],b[k])
return diff
# return set(a.items()) ^ set(b.items())
class WartsTraceBox | Reader(WartsReader):
def __init__(self, wartsfile, verbose=False):
super(WartsTraceBoxReader, self).__init__(wartsfile, verbose)
def next(self):
while True:
obj = self.next_object()
if not obj:
return (False, False)
if (obj.typ == TRACEBOXTYPE):
return (obj.flags, obj.pkts)
def next_object(self):
# read warts object header
self.header = self.fd.read(8)
# sanity check
if len(self.header) != 8:
return None
(magic, typ, length) = struct.unpack('!HHI', self.header)
if self.verbose:
print "Magic: %02X Obj: %02X Len: %02x" % (magic, typ, length)
assert(magic == obj_type['MAGIC'])
# read remainder of object
data = self.fd.read(length)
if typ == obj_type['LIST']:
return WartsList(data, verbose=self.verbose)
elif typ == obj_type['CYCLESTART']:
return WartsCycle(data, verbose=self.verbose)
elif typ == obj_type['CYCLE']:
return WartsCycle(data, verbose=self.verbose)
elif typ == obj_type['CYCLE_STOP']:
return WartsCycleStop(data, verbose=self.verbose)
elif typ == TRACEBOXTYPE:
return WartsTraceBox(data, verbose=self.verbose)
else:
print "Unsupported object: %02x Len: %d" % (typ, length)
assert False
class WartsTraceBox(WartsBaseObject):
def __init__(self, data, verbose=False):
super(WartsTraceBox, self).__init__(TRACEBOXTYPE, verbose)
self.data = data
self.flagdata = data
self.pkts = []
self.flag_defines = [
('listid', unpack_uint32_t),
('cycleid', unpack_uint32_t),
('userid', unpack_uint32_t),
('srcaddr', self.unpack_address),
('dstaddr', self.unpack_address),
('sport', unpack_uint16_t),
('dport', unpack_uint16_t),
('start', read_timeval),
('result', unpack_uint16_t),
('rtt', unpack_uint8_t),
('qtype', unpack_uint8_t),
('udp', unpack_uint8_t),
('printmode', unpack_uint8_t),
('pktc16', unpack_uint16_t),
('pktc', unpack_uint32_t),
]
flag_bytes = self.read_flags()
if self.verbose:
print "TB Params:", self.flags
offset = flag_bytes
for i in range(self.flags['pktc']):
pkt = WartsTraceBoxPkt(data[offset:], self.referenced_address, self.verbose)
self.pkts.append(pkt.flags)
offset+=pkt.flag_bytes
if self.verbose: print "Pkt %d: %s" % (i+1, pkt.flags)
class WartsTraceBoxPkt(WartsBaseObject):
def __init__(self, data, refs, verbose=False):
super(WartsTraceBoxPkt, self).__init__(TRACEBOXTYPE, verbose)
self.update_ref(refs)
self.flagdata = data
self.flag_defines = [
('dir', unpack_uint8_t),
('time', read_timeval),
('len', unpack_uint16_t),
('data', self.read_pass),
]
self.flag_bytes = self.read_flags()
datalen = self.flags['len']
self.flags['data'] = self.read_tracebox_pkt(data[self.flag_bytes:self.flag_bytes+datalen])
self.flag_bytes += self.flags['len']
def read_pass(self, b):
return ("pass", 0)
def read_tracebox_pkt(self, data):
fields = dict()
ip = dpkt.ip.IP(data)
fields['hop'] = socket.inet_ntoa(ip.src)
if ip.p == dpkt.ip.IP_PROTO_ICMP:
# This is a reply from a hop
fields['hop'] = socket.inet_ntoa(ip.src)
icmp = ip.data
#print "ICMP quote:", icmp.type, icmp.code, "LEN:", len(icmp.data.data)
# icmp.data is type dpkt.icmp.TimeExceed
# so, icmp.data.data is a dpkt.ip.IP
ip = icmp.data.data
fields['IP::Version'] = ip.v
fields['IP::IHL'] = ip.hl
dscp = (ip.tos & 0xFC) >> 2
ecn = (ip.tos & 0x03)
fields['IP::DiffServicesCP'] = hex(dscp)
fields['IP::ECN'] = hex(ecn)
fields['IP:Length'] = hex(ip.len)
fields['IP:ID'] = ip.id
flags = (ip.df >> 1) + ip.mf
fields['IP:Flags'] = hex(flags)
fields['IP:FragmentOffset'] = ip.offset
fields['IP:TTL'] = ip.ttl
fields['IP::Protocol'] = ip.p
fields['IP::Checksum'] = hex(ip.sum)
fields['IP::SourceAddr'] = socket.inet_ntoa(ip.src)
fields['IP::DestAddr'] = socket.inet_ntoa(ip.dst)
if ip.p == dpkt.ip.IP_PROTO_TCP:
tcp = ip.data
if not isinstance(tcp, dpkt.tcp.TCP):
#print "Partial quote!"
z = struct.pack('12sB',ip.data,0x50) + struct.pack('7B',*([0]*7))
tcp = dpkt.tcp.TCP(z)
#print type(tcp)
if len(ip.data) >= 4:
fields['TCP::SPort'] = hex(tcp.sport)
fields['TCP::DPort'] = hex(tcp.dport)
if len(ip.data) >= 8:
fields['TCP::SeqNumber'] = hex(tcp.seq)
if len(ip.data) >= 12:
fields['TCP::AckNumber'] = hex(tcp.ack)
if len(ip.data) >= 16:
fields['TCP::Offset'] = hex(tcp.off)
fields['TCP::Flags'] = hex(tcp.flags)
fields['TCP::Window'] = hex(tcp.win)
if len(ip.data) == 20:
fields['TCP::Checksum'] = hex(tcp.sum)
fields['TCP::UrgentPtr'] = hex(tcp.urp)
if len(ip.data) >= 20:
if len(tcp.opts) > 0:
opts = dpkt.tcp.parse_opts(tcp.opts)
for o,d in opts:
if o == dpkt.tcp.TCP_OPT_EOL:
fields['TCP::OPT_EOL'] = d
elif o == dpkt.tcp.TCP_OPT_NOP:
fields['TCP::OPT_NOP'] = d
elif o == dpkt.tcp.TCP_OPT_MSS:
fields['TCP::OPT_MSS'] = d
elif o == dpkt.tcp.TCP_OPT_WSCALE:
fields['TCP::OPT_WSCALE'] = d
elif o == dpkt.tcp.TCP_OPT_SACKOK:
fields['TCP::OPT_SACKOK'] = d
elif o == dpkt.tcp.TCP_OPT_SACK:
fields['TCP::OPT_SACK'] = d
elif o == dpkt.tcp.TCP_OPT_TIMESTAMP:
fields['TCP::OPT_TIMESTAMP'] = d
return fields
if __name__ == "__main__":
assert len(sys.argv) == 2
w = WartsTraceBoxReader(sys.argv[1], verbose=False)
while True:
(flags, pkts) = w.next()
if flags == False: break
print "tracebox from %s to %s (result: %d)" % (flags['srcaddr'], flags['dstaddr'], flags['result'])
last_tx = None
last_tx_ts = 0
i = 0
for pkt in pkts:
ts = pkt['time'] - flags['start']
if pkt['dir'] == 1: #TX
#print " TX at %1.3f:" % (ts)
if last_tx != None:
i+=1
print " %d: *" % (i)
last_tx = pkt['data']
last_tx_ts = pkt['time']
else: #RX
#print " RX at %1.3f:" % (ts)
i+=1
rtt = (pkt['time'] - last_tx_ts)*1000.0
if last_tx:
diff = dict_diff(last_tx, pkt['data'])
print " %d: %s RTT:%1.3f: %s" % (i, pkt['data']['hop'], rtt, " ".join(diff.keys()))
last_tx = None
|
OkCupid/okws | test/regtest/cases/98.py | Python | gpl-2.0 | 542 | 0.027675 | test = "tes | t of the localtime() function"
import time
times = [ 0, 100000, int (time.time()) ] |
filedata = """
{$
for (i, %(times)s) {
locals { v : localtime(i) }
print ("${v[0]} ${v[1]} ${v[2]} ${v[3]} ${v[4]} ");
}
$}
""" % { "times" : times }
# in publand, localtime(0) should give the time now
times[0] = int (time.time())
outcome_v = []
for i in times:
lt = time.localtime (i)
outcome_v += [ lt.tm_year, lt.tm_mon, lt.tm_mday, lt.tm_hour, lt.tm_min ]
outcome = " ".join ([ str (i) for i in outcome_v ])
|
TobiHartmann/hotspot | benchmarks/run_tests.py | Python | gpl-2.0 | 18,824 | 0.020399 | import os
import shlex
import datetime
import time
import shutil
from subprocess import Popen, PIPE
home = "/home/tobias/"
vm_config = " -XX:+PrintCodeCache "
# Benchmark configurations
dacapo_names = ("avrora", "batik", "fop", "h2", "jython", "luindex", "lusearch" ,"pmd", "sunflow", "tomcat", "tradebeans", "xalan")
cmd_dacapo = "-jar " + home + "/programs/DaCapo/dacapo-9.12-bach.jar -n 20 "
cmd_octane = "-Dengine=nashorn -jar " + home + "/programs/jdk8/jre/lib/ext/nashorn.jar run.js "
cmd_specjvm = "-jar " + home + "/programs/SPECjvm2008/SPECjvm2008.jar -coe startup.helloworld startup.compiler.compiler startup.compress startup.crypto.aes startup.crypto.rsa startup.crypto.signverify startup.mpegaudio startup.scimark.fft startup.scimark.lu startup.scimark.monte_carlo startup.scimark.sor startup | .scimark.sparse startup.serial startup.sunflow startup.xml.transform startup.xml.validation compress crypto.aes crypto.rsa crypto.signverify mpegaudio scimark.fft.large scimark.lu.large | scimark.sor.large scimark.sparse.large scimark.fft.small scimark.lu.small scimark.sor.small scimark.sparse.small scimark.monte_carlo serial sunflow xml.validation"
cmd_specjbb2005 = "-cp './jbb.jar:./check.jar' spec.jbb.JBBmain -propfile SPECjbb.props"
cmd_specjbb2013 = "-jar " + home + "/programs/SPECjbb2013/specjbb2013.jar -m COMPOSITE"
def run_test(name, chdir, cmd, runs, enable_jstat=False, redirect=True):
os.chdir(chdir)
if redirect:
f_stdout = open(home + name + ".log", 'a');
f_stderr = open(home + name + "_err.log", 'a');
f_stdout.write(cmd + "\n")
f_stderr.write(cmd + "\n")
for i in range (0, runs):
print str(datetime.datetime.now()) + " " + name + ": Run " + str(i+1) + " of " + str(runs)
print(cmd + "\n")
start_time = time.time()
if redirect:
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
else:
process = Popen(shlex.split(cmd))
if enable_jstat:
time.sleep(2)
jstat = Popen(shlex.split(home + "/programs/jdk8/bin/jstat -compiler " + str(process.pid) + " 500"), stdout=PIPE, stderr=PIPE)
os.waitpid(jstat.pid, 0)
exit_code = os.waitpid(process.pid, 0)
output, output_err = process.communicate()
if enable_jstat:
jstat_out, jstat_err = jstat.communicate()
jstat.wait()
process.wait()
elapsed_time = (time.time() - start_time)
if redirect:
f_stdout.write(output)
f_stdout.write("## TIME: " + str(datetime.datetime.now()) + " ## ELAPSED ## " + str(elapsed_time))
f_stderr.write(output_err)
f_stderr.write("## TIME: " + str(datetime.datetime.now()) + " ## ELAPSED ## " + str(elapsed_time))
if enable_jstat:
comp_stats = "\n## jstat compiler stats: " + jstat_out.splitlines()[-1] + "\n"
f_stdout.write(comp_stats)
f_stderr.write(comp_stats)
def get_cmd_vm_new(ReservedCodeCacheSize, CompilerCount = 0):
if (CompilerCount == 0):
compiler_count = ""
else:
compiler_count = " -XX:CICompilerCount=" + str(CompilerCount) + " "
if (ReservedCodeCacheSize == 0):
return home + "/export_new/bin/java " + vm_config + " " + compiler_count
else:
return home + "/export_new/bin/java " + vm_config + " -XX:ReservedCodeCacheSize=" + str(ReservedCodeCacheSize) + "m " + compiler_count
def get_cmd_vm_new_mb(ReservedCodeCacheSize, CompilerCount = 0):
if (CompilerCount == 0):
compiler_count = ""
else:
compiler_count = " -XX:CICompilerCount=" + str(CompilerCount) + " "
if (ReservedCodeCacheSize == 0):
return home + "/export_new_mb/bin/java " + vm_config + " " + compiler_count
else:
return home + "/export_new_mb/bin/java " + vm_config + " -XX:ReservedCodeCacheSize=" + str(ReservedCodeCacheSize) + "m " + compiler_count
def get_cmd_vm_new_mb_A(ReservedCodeCacheSize, CompilerCount = 0):
if (CompilerCount == 0):
compiler_count = ""
else:
compiler_count = " -XX:CICompilerCount=" + str(CompilerCount) + " "
if (ReservedCodeCacheSize == 0):
return home + "/export_new_mb_A/bin/java " + vm_config + " " + compiler_count
else:
return home + "/export_new_mb_A/bin/java " + vm_config + " -XX:ReservedCodeCacheSize=" + str(ReservedCodeCacheSize) + "m " + compiler_count
def get_cmd_vm_new_mb_B(ReservedCodeCacheSize, CompilerCount = 0):
if (CompilerCount == 0):
compiler_count = ""
else:
compiler_count = " -XX:CICompilerCount=" + str(CompilerCount) + " "
if (ReservedCodeCacheSize == 0):
return home + "/export_new_mb_B/bin/java " + vm_config + " " + compiler_count
else:
return home + "/export_new_mb_B/bin/java " + vm_config + " -XX:ReservedCodeCacheSize=" + str(ReservedCodeCacheSize) + "m " + compiler_count
def get_cmd_vm_new2(ReservedCodeCacheSize, NonMethodCodeHeapSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize):
return home + "/export_new/bin/java " + vm_config + " -XX:ReservedCodeCacheSize=" + str(ReservedCodeCacheSize) + "m -XX:NonMethodCodeHeapSize=" + str(NonMethodCodeHeapSize) + "m -XX:NonProfiledCodeHeapSize=" + str(NonProfiledCodeHeapSize) + "m -XX:ProfiledCodeHeapSize=" + str(ProfiledCodeHeapSize) + "m "
def get_cmd_vm_old(ReservedCodeCacheSize, CompilerCount = 0):
if (CompilerCount == 0):
compiler_count = ""
else:
compiler_count = " -XX:CICompilerCount=" + str(CompilerCount) + " "
if (ReservedCodeCacheSize == 0):
return home + "/export_old/bin/java " + vm_config + " " + compiler_count
else:
return home + "/export_old/bin/java " + vm_config + " -XX:ReservedCodeCacheSize=" + str(ReservedCodeCacheSize) + "m " + compiler_count
def test_OctaneRatio():
ReservedCodeCacheSize = 32
non_profile_sizes = (4, 8, 12, 16, 20, 24, 28)
for i in range(0, len(non_profile_sizes)):
NonProfiledCodeHeapSize = non_profile_sizes[i]
ProfiledCodeHeapSize = ReservedCodeCacheSize - NonProfiledCodeHeapSize
# Run new vm
cmd_vm_new = get_cmd_vm_new2(ReservedCodeCacheSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize)
run_test("OctaneNew_" + str(NonProfiledCodeHeapSize), home + "/sources/octane", cmd_vm_new + cmd_octane, 10);
def test_OctaneRatio2():
ReservedCodeCacheSize = 256
NonMethodCodeHeapSize = 5
NonProfiledCodeHeapSize = 126
ProfiledCodeHeapSize = 125
cmd_vm_new = get_cmd_vm_new2(ReservedCodeCacheSize, NonMethodCodeHeapSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize)
# run_test("OctaneNew_Ratio_" + str(NonProfiledCodeHeapSize), home + "/sources/octane", cmd_vm_new + cmd_octane, 20);
NonMethodCodeHeapSize = 5
NonProfiledCodeHeapSize = 84
ProfiledCodeHeapSize = 167
cmd_vm_new = get_cmd_vm_new2(ReservedCodeCacheSize, NonMethodCodeHeapSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize)
# run_test("OctaneNew_Ratio_" + str(NonProfiledCodeHeapSize), home + "/sources/octane", cmd_vm_new + cmd_octane, 20);
NonMethodCodeHeapSize = 5
NonProfiledCodeHeapSize = 168
ProfiledCodeHeapSize = 83
cmd_vm_new = get_cmd_vm_new2(ReservedCodeCacheSize, NonMethodCodeHeapSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize)
# run_test("OctaneNew_Ratio_" + str(NonProfiledCodeHeapSize), home + "/sources/octane", cmd_vm_new + cmd_octane, 20);
ReservedCodeCacheSize = 64
NonMethodCodeHeapSize = 5
NonProfiledCodeHeapSize = 30
ProfiledCodeHeapSize = 29
cmd_vm_new = get_cmd_vm_new2(ReservedCodeCacheSize, NonMethodCodeHeapSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize)
# run_test("OctaneNew_Ratio_" + str(NonProfiledCodeHeapSize), home + "/sources/octane", cmd_vm_new + cmd_octane, 20);
NonMethodCodeHeapSize = 5
NonProfiledCodeHeapSize = 20
ProfiledCodeHeapSize = 39
cmd_vm_new = get_cmd_vm_new2(ReservedCodeCacheSize, NonMethodCodeHeapSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize)
# run_test("OctaneNew_Ratio_" + str(NonProfiledCodeHeapSize), home + "/sources/octane", cmd_vm_new + cmd_octane, 20);
NonMethodCodeHeapSize = 5
NonProfiledCodeHeapSize = 40
ProfiledCodeHeapSize = 19
cmd_vm_new = get_cmd_vm_new2(ReservedCodeCacheSize, NonMethodCodeHeapSize, NonProfiledCodeHeapSize, ProfiledCodeHeapSize)
# run_test("OctaneNew_Ratio_" + str(NonProfiledCodeHeapSize), home + "/sources/octane", cmd_vm_new + cmd_o |
MediaMath/Diamond | src/collectors/diskspace/diskspace.py | Python | mit | 9,080 | 0 | # coding=utf-8
"""
Uses /proc/mounts and os.statvfs() to get disk space usage
#### Dependencies
* /proc/mounts
#### Examples
# no exclude filters at all
exclude_filters =,
# exclude everything that begins /boot or /mnt
exclude_filters = ^/boot, ^/mnt
# exclude everything that includes the letter 'm'
exclude_filters = m,
"""
import diamond.collector
import diamond.convertor
import os
import re
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
class DiskSpaceCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(DiskSpaceCollector, self).get_default_config_help()
config_help.update({
'filesystems': "filesystems to examine",
'exclude_filters': "A list of regex patterns. Any filesystem"
+ " matching any of these patterns will be excluded from disk"
+ " | space metrics collection",
})
return config_help
def get_def | ault_config(self):
"""
Returns the default collector settings
"""
config = super(DiskSpaceCollector, self).get_default_config()
config.update({
# Enabled by default
'enabled': 'True',
'path': 'diskspace',
# filesystems to examine
'filesystems': 'ext2, ext3, ext4, xfs, glusterfs, nfs, ntfs, hfs,'
+ ' fat32, fat16',
# exclude_filters
# A list of regex patterns
# A filesystem matching any of these patterns will be excluded
# from disk space metrics collection.
#
# Examples:
# exclude_filters =,
# no exclude filters at all
# exclude_filters = ^/boot, ^/mnt
# exclude everything that begins /boot or /mnt
# exclude_filters = m,
# exclude everything that includes the letter "m"
'exclude_filters': ['^/export/home'],
# We don't use any derivative data to calculate this value
# Thus we can use a threaded model
'method': 'Threaded',
# Default numeric output
'byte_unit': ['byte']
})
return config
def __init__(self, config, handlers):
super(DiskSpaceCollector, self).__init__(config, handlers)
# Precompile things
self.exclude_filters = self.config['exclude_filters']
if isinstance(self.exclude_filters, basestring):
self.exclude_filters = [self.exclude_filters]
self.exclude_reg = re.compile('|'.join(self.exclude_filters))
self.filesystems = []
if isinstance(self.config['filesystems'], basestring):
for filesystem in self.config['filesystems'].split(','):
self.filesystems.append(filesystem.strip())
elif isinstance(self.config['filesystems'], list):
self.filesystems = self.config['filesystems']
def get_disk_labels(self):
"""
Creates a mapping of device nodes to filesystem labels
"""
path = '/dev/disk/by-label/'
labels = {}
if not os.path.isdir(path):
return labels
for label in os.listdir(path):
device = os.path.realpath(path + '/' + label)
labels[device] = label
return labels
def get_file_systems(self):
"""
Creates a map of mounted filesystems on the machine.
iostat(1): Each sector has size of 512 bytes.
Returns:
(major, minor) -> FileSystem(device, mount_point)
"""
result = {}
if os.access('/proc/mounts', os.R_OK):
file = open('/proc/mounts')
for line in file:
try:
mount = line.split()
device = mount[0]
mount_point = mount[1]
fs_type = mount[2]
except (IndexError, ValueError):
continue
# Skip the filesystem if it is not in the list of valid
# filesystems
if fs_type not in self.filesystems:
self.log.debug("Ignoring %s since it is of type %s which "
+ " is not in the list of filesystems.",
mount_point, fs_type)
continue
# Process the filters
if self.exclude_reg.match(mount_point):
self.log.debug("Ignoring %s since it is in the "
+ "exclude_filter list.", mount_point)
continue
if (mount_point.startswith('/dev')
or mount_point.startswith('/proc')
or mount_point.startswith('/sys')):
continue
if '/' in device and mount_point.startswith('/'):
try:
stat = os.stat(mount_point)
major = os.major(stat.st_dev)
minor = os.minor(stat.st_dev)
except OSError:
self.log.debug("Path %s is not mounted - skipping.",
mount_point)
continue
if (major, minor) in result:
continue
result[(major, minor)] = {
'device': device,
'mount_point': mount_point,
'fs_type': fs_type
}
file.close()
else:
if not psutil:
self.log.error('Unable to import psutil')
return None
partitions = psutil.disk_partitions(False)
for partition in partitions:
result[(0, len(result))] = {
'device': partition.device,
'mount_point': partition.mountpoint,
'fs_type': partition.fstype
}
pass
return result
def collect(self):
labels = self.get_disk_labels()
results = self.get_file_systems()
if not results:
self.log.error('No diskspace metrics retrieved')
return None
for key, info in results.iteritems():
if info['device'] in labels:
name = labels[info['device']]
else:
name = info['mount_point'].replace('/', '_')
name = name.replace('.', '_').replace('\\', '')
if name == '_':
name = 'root'
if hasattr(os, 'statvfs'): # POSIX
data = os.statvfs(info['mount_point'])
block_size = data.f_bsize
blocks_total = data.f_blocks
blocks_free = data.f_bfree
blocks_avail = data.f_bavail
inodes_total = data.f_files
inodes_free = data.f_ffree
inodes_avail = data.f_favail
elif os.name == 'nt': # Windows
# fixme: used still not exact compared to disk_usage.py
# from psutil
raw_data = psutil.disk_usage(info['mount_point'])
block_size = 1 # fixme: ?
blocks_total = raw_data.total
blocks_free = raw_data.free
blocks_used = raw_data.used
else:
raise NotImplementedError("platform not supported")
for unit in self.config['byte_unit']:
metric_name = '%s.%s_used' % (name, unit)
metric_value = float(block_size) * float(
blocks_total - blocks_free)
metric_value = diamond.convertor.binary.convert(
value=metric_value, oldUnit='byte', newUnit=unit)
self.publish_gauge(metric_name, metric_value, 2)
metric_name = '%s.%s_free' % (name, unit)
metric_value = float(block_size) * float(bloc |
vrk-kpa/api-catalog | ckanext/ckanext-apicatalog_ui/ckanext/apicatalog_ui/admindashboard.py | Python | mit | 15,229 | 0.00243 | from __future__ import absolute_import
import logging
import ckan.logic as logic
import ckan.model as model
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.lib.dictization as dictization
from sqlalchemy import func, text, or_, and_
from datetime import datetime, timedelta
from .utils import package_generator
import flask
from ckan.plugins import toolkit
get_action = logic.get_action
check_access = logic.check_access
NotAuthorized = logic.NotAuthorized
log = logging.getLogger(__name__)
admin_dashboard = flask.Blueprint('admin_dashboard', __name__, url_prefix='/ckan-admin')
def get_blueprint():
return [admin_dashboard]
@admin_dashboard.route('/admin_dashboard')
def read():
context = {'user': toolkit.g.user, 'auth_user_obj': toolkit.g.userobj}
| try:
toolkit.check_access('admin_dashboard', context, {})
# Fetch invalid resources
invalid_resources = fetch_invalid_resources()
# Query package statistics
statistics = fetch_package_statistics()
# Find packageless organizations and produce a chan | gelog
(packageless_organizations, packageless_organizations_changelog) = \
fetch_packageless_organizations_and_changelog(context)
# Generate activity stream snippet
# FIXME: Disabled because fetch_recent_package_activity_list_html is not ported to CKAN 2.9
# package_activity_html = fetch_recent_package_activity_list_html(context, user_not='harvest')
# harvest_activity_html = fetch_recent_package_activity_list_html(context, user='harvest')
# privatized_activity_html = fetch_recent_package_activity_list_html(context, only_privatized=True)
# interesting_activity_html = fetch_recent_package_activity_list_html(context, only_resourceful=True)
def prepare_heartbeat(hb):
if hb:
return {'success': hb.get('success'),
'timestamp': datetime.strptime(hb.get('timestamp'), '%Y-%m-%dT%H:%M:%S.%f')}
else:
return hb
xroad_heartbeat_latest = toolkit.get_action('xroad_heartbeat')(context, {})
# 'success' here means heartbeat checks have been performed, the check success is within the 'heartbeat' property
if xroad_heartbeat_latest.get('success'):
one_day_ago = datetime.now() - timedelta(days=1)
xroad_heartbeat_history = toolkit.get_action('xroad_heartbeat_history')(context, {'since': one_day_ago})
xroad_heartbeat = {
'latest': prepare_heartbeat(xroad_heartbeat_latest.get('heartbeat')),
'history': [prepare_heartbeat(item) for item in xroad_heartbeat_history.get('items', [])]
}
else:
xroad_heartbeat = False
# Render template
vars = {'invalid_resources': invalid_resources,
# 'package_activity_html': package_activity_html,
# 'harvest_activity_html': harvest_activity_html,
# 'privatized_activity_html': privatized_activity_html,
# 'interesting_activity_html': interesting_activity_html,
'packageless_organizations': packageless_organizations,
'packageless_organizations_changelog': packageless_organizations_changelog,
'stats': statistics,
'xroad_heartbeat': xroad_heartbeat,
}
template = 'admin/dashboard.html'
return toolkit.render(template, extra_vars=vars)
except toolkit.NotAuthorized:
toolkit.abort(403)
def fetch_invalid_resources():
context = {'ignore_auth': True}
def invalid_resource_generator():
for package in package_generator(context, '*:*', 1000):
for resource in package.get('resources', []):
if resource.get('valid_content', 'yes') == 'no':
yield (resource, package)
return list(invalid_resource_generator())
def fetch_package_statistics():
# Query the number of packages by "private"-value
public_private_query = (
model.Session.query(model.Package.private, func.count(model.Package.id))
.filter(model.Package.state == 'active')
.filter(model.Package.type == 'dataset')
.group_by(model.Package.private))
public_count = 0
private_count = 0
for private, count in public_private_query:
if private:
private_count = count
else:
public_count = count
# Query new package counts for different intervals
def new_packages_since(dt):
created = (
model.Session.query(
model.Package.id.label('id'),
model.Package.metadata_created.label('ts'))
.filter(model.Package.type == 'dataset')
.subquery())
return (model.Session.query(func.count(created.c.id))
.filter(created.c.ts >= dt)
.one())[0]
new_last_week = new_packages_since(datetime.utcnow() - timedelta(weeks=1))
new_last_month = new_packages_since(datetime.utcnow() - timedelta(days=30))
new_last_year = new_packages_since(datetime.utcnow() - timedelta(days=365))
return {'public': public_count,
'private': private_count,
'new_last_week': new_last_week,
'new_last_month': new_last_month,
'new_last_year': new_last_year,
}
def fetch_recent_package_activity_list_html(
context, user=None, user_not=None, only_privatized=False,
only_resourceful=False, limit=30):
# FIXME: disable function pending porting to CKAN 2.9
raise Exception('fetch_recent_package_activity_list_html is not yet ported for CKAN 2.9')
# FIXME: activity_streams was removed in CKAN 2.9, hack to "fix" references until porting
activity_streams = None
# Fetch recent revisions, store as list oredered by time
recent_revisions_query = (
model.Session.query(model.PackageRevision, model.User.id)
.join(model.Revision, model.PackageRevision.revision_id == model.Revision.id)
.join(model.User, model.Revision.author == model.User.name)
.distinct())
if only_resourceful:
recent_revisions_query = (
recent_revisions_query
.join(model.Resource, model.Resource.package_id == model.PackageRevision.id)
.filter(model.Resource.state == "active"))
if user is not None:
recent_revisions_query = recent_revisions_query.filter(
model.Revision.author == user)
if user_not is not None:
recent_revisions_query = recent_revisions_query.filter(
model.Revision.author != user_not)
if only_privatized:
recent_revisions_query = recent_revisions_query.filter(
model.PackageRevision.private)
recent_revisions_query = (
recent_revisions_query
.order_by(model.PackageRevision.metadata_modified.desc())
.limit(limit))
recent_revisions = [r for r in recent_revisions_query]
# Fetch related packages, store by id
packages = {r.id: None for r, uid in recent_revisions}
packages_query = (
model.Session.query(model.Package)
.filter(model.Package.id.in_(list(packages.keys()))))
for package in packages_query:
packages[package.id] = package
# Fetch related packages' first revision timestamps
packages_created = {}
packages_created_query = (
model.Session.query(
model.PackageRevision.id.label('id'),
func.min(model.PackageRevision.metadata_modified).label('ts'))
.filter(model.PackageRevision.id.in_(list(packages.keys())))
.group_by(model.PackageRevision.id))
for package_id, created in packages_created_query:
packages_created[package_id] = created
# Fetch previous revisions for the recent revisions
packages_previous = {}
packages_previous_query = (
model.Session.query(model.PackageRevision.revision_id.label("rid"), model.PackageRevision)
|
UManPychron/pychron | pychron/hardware/mdrive/__init__.py | Python | apache-2.0 | 1,003 | 0.000997 | # ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Versio | n 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CON | DITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
# ============= EOF =============================================
|
kergely/Character-creator-szerep | src/skills.py | Python | mit | 4,916 | 0.000407 | import dices
class skill(object):
"""
Contains a skill.
calling is done by skill(bases,value,known=True)
bases::string : attributes it depends on. (either ABCx2 or ABC+XYZ format)
value: value of the skill. If left empty, standard is calculated.
study(learned) --> learned is added to the value of the skill.
learn --> teaches the charcter the learned skills
"""
def __init__(self, bases, value=None, encaffect=True):
super(skill, self).__init__()
self.encaffect = encaffect
self.value = value
self.base = ["", ""]
self.base[0] = bases[:3]
if bases[3] == "x":
self.base[1] = self.base[0]
else:
self.base[1] = bases[-3:]
def basevalue(self, inp):
self.value = 0
self.value = inp # TODO decide how to do this
def study(self, learned):
self.value += learned
# now the skills as a class is generated
# the next step is creating all the skills
standardlist = {
"Athletics": skill("STR+DEX"),
"Boating": skill("STR+CON"),
"Brawn": skill("STR+SIZ"),
"Conceal": skill("DEX+POW"),
"Customs": skill("INTx2", encaffect=False),
"Dance": skill("DEX+CHA"),
"Deceit": skill("INT+CHA", encaffect=False),
"Drive": skill("DEX+POW"),
"Endurance": skill("CONx2", encaffect=False),
"Evade": skill("DEXx2"),
"FirstAid": skill("INT+DEX"),
"Influence": skill("CHAx2", encaffect=False),
"Insight": skill("INT+POW", encaffect=False),
"Locale": skill("INTx2", encaffect=False),
"Perception": skill("INT+POW", encaffect=False),
"Ride": skill("DEX+PO | W"),
"Sing": skill("CHA+POW", encaffect=False),
"Stealth": skill("DEX+INT"),
"Swim": skill("STR+CON"),
"Unarmed": skill("STR+DEX"),
"Willpower": skill("POWx2"),
}
# standard is used to store the standard skills used in the Mythras franchise
professionallist = {
"Acrobatics": skill("STR+ DEX"),
"Art": skill("POW+CHA"),
"Bureaucracy": skill("INTx2"),
"Commerce": skill("INT+CHA"),
"Courtesy": | skill("INT+CHA"),
"Craft": skill("DEX+INT"),
"Culture": skill("INTx2", encaffect=False),
"Devotion": skill("POW+CHA"),
"Disguise": skill("INT+CHA"),
"Engineering": skill("INTx2"),
"Exhort": skill("INT+CHA"),
"Folk Magic": skill("POW +CHA"),
"Gambling": skill("INT+POW"),
"Healing": skill("INT+POW"),
"Language": skill("INT+CHA", encaffect=False),
"Literacy": skill("INTx2"),
"Lockpicking": skill("DEXx2"),
"Lore": skill("INTx2"),
"Mechanisms": skill("DEX+INT"),
"Musicianship": skill("DEX+CHA"),
"Navigation": skill("INT+POW"),
"Oratory": skill("POW+CHA"),
"Seamanship": skill("INT+CON"),
"Seduction": skill("INT+CHA"),
"Sleight": skill("DEX+CHA"),
"Streetwise": skill("POW+CHA"),
"Survival": skill("CON+POW"),
"Teach": skill("INT+CHA"),
"Track": skill("INT+CON"),
}
# professionallist is used to store the professional skills in Mythras
# "Name" : [skill::skill]
class knownskills(dict):
"""
A dictionary with a few special powers
"""
def __init__(self, *args):
super(knownskills, self).__init__()
global standardlist
global professionallist
self.update(standardlist) # creating the Standard skills
for element in args:
# one shold be capable off adding, this is with the arguments
try:
self[element] = professionallist[element]
except KeyError as hiba:
msg = str(hiba) + \
"is not a recognized professional skill in Mythras"
print msg
print "It was not added to the list of skills"
except:
print "Something went wrong"
print element + " was not added to the list of skills"
# this handled the input errors
def learn(self, *args):
global professionallist
for element in args:
if element in self:
print element, "is already in the learned skills list", \
", it was not added to protect the value"
else:
try:
self[element] = professionallist[element]
except KeyError as hiba:
msg = str(hiba) + \
"is not a recognized professional skill in Mythras"
print msg
print "It was not added to the list of skills"
except:
print "Something went wrong"
print element + " was not added to the list of skills"
# this handled the input errors
self[element] = professionallist[element]
# test = knownskills("Disguise")
# test.learn("Lore")
# print test["Swim"].base
# print test["Disguise"].base
# print test["Lore"].base
|
dcloud/django-postgres-copy | setup.py | Python | mit | 1,511 | 0.001324 | from setuptools import setup
from distutils.core import Command
class TestCommand(Command):
user_options = []
def initialize_options(self | ):
pass
def finalize_options(self):
pass
def run(self):
import django
from django.core.management import call_command
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'NAME': 'test',
'ENGINE': 'django.db.backends.postgresql_psycopg2'
},
| 'sqlite': {
'NAME': 'sqlite',
'ENGINE': 'django.db.backends.sqlite3'
}
},
INSTALLED_APPS=("tests",),
)
django.setup()
call_command('test', 'tests.tests')
setup(
name='django-postgres-copy',
version='0.0.6',
description='Quickly load comma-delimited data into a Django model using PostgreSQL\'s COPY command',
author='Ben Welsh',
author_email='ben.welsh@gmail.com',
url='http://www.github.com/california-civic-data-coalition/django-postgresql-copy/',
license="MIT",
packages=("postgres_copy",),
install_requires=(
'psycopg2>=2.5',
),
cmdclass={'test': TestCommand},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
acuriel/lsmdico | lsmdico/urls.py | Python | gpl-3.0 | 1,142 | 0.001751 | """lsmdico URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a | URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, | url
from . import views
#from django.contrib import admin
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name="index"),
url(r'^login/$', views.LoginView.as_view(), name="login"),
url(r'^logout/$', views.logout_view, name="logout"),
url(r'^manage/$', views.ManageView.as_view(), name="manage"),
url(r'^entries/', include('lsmentries.urls',
namespace="lsmentries")),
#url(r'^admin/', admin.site.urls),
]
|
RedHatQE/cfme_tests | cfme/tests/services/test_pxe_service_catalogs.py | Python | gpl-2.0 | 6,083 | 0.002466 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.pxe import get_pxe_server_from_config
from cfme.infrastructure.pxe import get_template_from_config
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils import testgen
from cfme.utils.blockers import BZ
from cfme.utils.blockers import GH
from cfme.utils.conf import cfme_data
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
test_requirements.service,
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=[
['provisioning', 'pxe_server'],
['provisioning', 'pxe_image'],
['provisioning', 'pxe_image_type'],
['provisioning', 'pxe_kickstart'],
['provisioning', 'pxe_template'],
['provisioning', 'datastore'],
['provisioning', 'host'],
['provisioning', 'pxe_root_password'],
['provisioning', 'vlan']
])
pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc)
argnames = argnames
pxe_server_names = [pval[0] for pval in pargvalues]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
if args['provider'].type == "scvmm":
continue
pxe_server_name = args['provider'].data['provisioning']['pxe_server']
if pxe_server_name not in pxe_server_names:
continue
pxe_cust_template = args['provider'].data['provisioning']['pxe_kickstart']
if pxe_cust_template not in cfme_data.get('customization_templates', {}).keys():
continue
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def pxe_server(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
return get_pxe_server_from_config(pxe_server_name, appliance=appliance)
@pytest.fixture(scope='module')
def pxe_cust_template(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_cust_template = provisioning_data['pxe_kickstart']
return get_template_from_config(pxe_cust_template, create=True, appliance=appliance)
@pytest.fixture(scope="function")
def setup_pxe_servers_vm_prov(pxe_server, pxe_cust_template, provisioning):
if not pxe_server.exists():
pxe_server.create()
pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type'])
@pytest.fixture(sco | pe="function")
def catalog_item(appliance, provider, dialog, catalog, provisioning,
setup_pxe_servers_vm_prov):
# generate_tests makes sure these have values
pxe_t | emplate, host, datastore, pxe_server, pxe_image, pxe_kickstart, pxe_root_password,\
pxe_image_type, pxe_vlan = map(
provisioning.get, (
'pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart',
'pxe_root_password', 'pxe_image_type', 'vlan'
)
)
provisioning_data = {
'catalog': {'catalog_name': {'name': pxe_template, 'provider': provider.name},
'provision_type': 'PXE',
'pxe_server': pxe_server,
'pxe_image': {'name': pxe_image},
'vm_name': random_vm_name('pxe_service')},
'environment': {'datastore_name': {'name': datastore},
'host_name': {'name': host}},
'customize': {'root_password': pxe_root_password,
'custom_template': {'name': pxe_kickstart}},
'network': {'vlan': partial_match(pxe_vlan)},
}
item_name = fauxfactory.gen_alphanumeric()
return appliance.collections.catalog_items.create(
provider.catalog_item_type,
name=item_name,
description="my catalog", display_in=True, catalog=catalog,
dialog=dialog, prov_data=provisioning_data)
@pytest.mark.rhv1
@pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:7965'),
BZ(1633516, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(RHEVMProvider))])
@pytest.mark.usefixtures('setup_pxe_servers_vm_prov')
def test_pxe_servicecatalog(appliance, setup_provider, provider, catalog_item, request):
"""Tests RHEV PXE service catalog
Metadata:
test_flag: pxe, provision
Polarion:
assignee: nansari
casecomponent: Services
initialEstimate: 1/4h
"""
vm_name = catalog_item.prov_data['catalog']["vm_name"]
request.addfinalizer(
lambda: appliance.collections.infra_vms.instantiate(
"{}0001".format(vm_name), provider).cleanup_on_provider()
)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
service_catalogs.order()
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request(num_sec=3600)
msg = "Provisioning failed with the message {}".format(provision_request.rest.message)
assert provision_request.is_succeeded(), msg
|
goujonpa/jeankevin | views/settingsView.py | Python | mit | 7,278 | 0.000962 | #!/usr/local/bin/python
# -*-coding:Utf-8 -*
import os
import math
def GA_settings():
"""Provides the view for the user setting of the GA experiments and returns the settings set"""
options = {}
os.system("clear")
print('===== OPTIONS =====\n')
preset = int(raw_input(
"PRESET\n"
"Use preset ?\n"
"\n\n-> 1: Source based preset\n"
"\n-> 2: I WANT TO SET BY MYSELF\n"
))
os.system("clear")
if preset == 1:
options["iterations"] = int(10000)
options["stopFitness"] = float(0.95)
options["mode"] = 'real'
options['crossMode'] = 'randomMultiPoint'
options["maximalPopulation"] = int(50)
options["mutationMode"] = 'oneNucleotid'
options["mutationProbability"] = float(0.05)
options["verbose"] = False
options["initialPopulation"] = int(100)
options['selectionMode'] = 'tournament'
elif preset == 2:
print('BASICS')
x = int(raw_input('Stop Iterations Number:\n'))
options['iterations'] = int(x)
options['stopFitness'] = float(raw_input(
'Stop Fitness:\n'
))
os.system('clear')
print('SELECTION')
options['selectionMode'] = int(raw_input(
'\nSelection Method:\n'
'--> 1: Roulette method\n'
'--> 2: Tournament method\n'
'--> 3: Roulette without replacement method\n'
))
if options['selectionMode'] == 1:
options['selectionMode'] = 'roulette'
elif options['selectionMode'] == 2:
options['selectionMode'] = 'tournament'
elif options['selectionMode'] == 3:
options['selectionMode'] = 'rouletteWR'
os.system('clear')
print('CROSSOVER & MUTATIONS')
options['mode'] = int(raw_input(
'Mode:\n'
'-> 1: Binary mode\n'
'-> 2: Real mode\n'
))
if options['mode'] == 1:
options['mode'] = 'binary'
elif options['mode'] == 2:
options['mode'] = 'real'
options['crossMode'] = int(raw_input(
'Crossover Mode:\n'
'--> 1: random one point\n'
'--> 2: random multipoint\n'
))
if options['crossMode'] == 1:
options['crossMode'] = 'randomOnePoint'
elif options['crossMode'] == 2:
options['crossMode'] = 'randomMultiPoint'
options['mutationMode'] = int(raw_input(
'Mutation Mode:\n'
'-> 0: Swap mode\n'
'-> 1: Each nucleotid has a chance to be muted, one by one\n'
'-> 2: 1 mutation maximum by child\n'
))
if options['mutationMode'] == 0:
options['mutationMode'] = 'swap'
elif options['mutationMode'] == 1:
options['mutationMode'] = 'everyNucleotid'
elif options['mutationMode'] == 2:
options['mutationMode'] = 'oneNucleotid'
options['mutationProbability'] = float(raw_input(
'Mutation Probability Mode:\n'
'-> 0 < n < 1: Fixed Probability\n'
'-> 2: Random Probability, basically between 1/BitArraySize and 1/PopulationSize\n'
))
os.system('clear')
print("POPULATION")
options["maximalPopulation"] = int(raw_input(
"Maximal Population:\n"
"-> n > 0: elitist insertion, just keep n best individuals\n"
"-> Other: every individual is kept (can slow down the algorythm for several iterations)\n"
"-> WARNING: If you set maximal population = 1 WITH roulette without replacement"
", your computer will explode\n"
))
options["initialPopulation"] = int(raw_input("Initialise with how much individuals ?\n"))
os.system("clear")
print("\nVERBOSE")
options["verbose"] = int(raw_input(
"Verbose Mode\n"
"-> 1: Enabled\n"
"-> 0: Disabled\n"
))
if options['verbose'] == 0:
options['verbose'] = False
elif options['verbose'] == 1:
options['verbose'] = True
os.system("clear")
return options
def ES_settings():
"""Provides the view for the user setting of the ES experiments and returns the settings set"""
os.system("clear")
print('===== OPTIONS =====\n')
options = {}
preset = int(raw_input(
"PRESET\n"
"Use preset ?\n"
"\n\n-> 1: Source based preset\n"
"\n-> 2: I WANT TO SET BY MYSELF\n"
))
os.system("clear")
if preset == 1:
options[" | iterations"] = int(1000)
options["stopFitness"] = float(0.95)
options["base"] = int(10)
options['verbose'] = False
options['selectionMode'] = int(1)
options['mutationMode'] = '2LRNS'
options['recombinationMode'] = 'weighted'
options['sigmaBoost'] = True
elif preset == 2:
print('\nBASICS')
x = int(raw_input('Stop Iterations Number:\n'))
options["iterations | "] = int(x)
options['stopFitness'] = float(raw_input('\nStop Fitness:\n'))
print("\nGENERATIONS")
options["base"] = int(raw_input(
'n setting:\n'
'lambda (number of child from the father) = 8 * n\n'
'mu (number of best child selected to make new father) = lambda / 4\n'
't (global step size) = 1 / (n)^(1/2)\n'
'ti (component step size) = 1 / (n)^(1/4)\n'
))
print('RECOMBINATION')
options['recombinationMode'] = int(raw_input(
'Recombination mode:\n'
'1- Intermediate\n'
'2- Select Best\n'
'3- Weighted\n'
))
if options['recombinationMode'] == 1:
options['recombinationMode'] = 'intermediate'
elif options['recombinationMode'] == 2:
options['recombinationMode'] = 'best'
elif options['recombinationMode'] == 3:
options['recombinationMode'] = 'weighted'
print('MUTATION')
options['mutationMode'] = int(raw_input(
'Mutation mode:\n'
'1- 2 Learning Rates, N Sigmas\n'
'2- 1 Learning Rate, 1 Sigma\n'
))
if options['mutationMode'] == 1:
options['mutationMode'] = '2LRNS'
elif options['mutationMode'] == 2:
options['mutationMode'] = '1LR1S'
print('SIGMA BOOST')
options['sigmaBoost'] = int(raw_input(
'Allow sigma boost YOLO special feature ?\n'
'1- sigma nitro enabled\n'
'2- sigma nitro disabled\n'
))
if options['sigmaBoost'] == 1:
options['sigmaBoost'] = True
elif options['sigmaBoost'] == 2:
options['sigmaBoost'] = False
print("\nVERBOSE")
options["verbose"] = int(raw_input(
"Verbose Mode\n"
"-> 1: Enabled\n"
"-> 0: Disabled\n"
))
os.system("clear")
options['maximalPopulation'] = 2 * options['base']
options['childNumber'] = 8 * options['base']
options['globalLearningRate'] = 1.0 / pow(options['base'], 0.5)
options['localLearningRate'] = 1.0 / pow(options['base'], 0.25)
return options
|
rwoodley/SphericalPhotoBrowser | server/server.py | Python | apache-2.0 | 578 | 0.00173 | from flask import Flask, Response, make_response
from video_stream_handler import stream_handler
import log | ging
i | mport cv2
# see line 398 of connectionpool.py:
logging.basicConfig(level=logging.DEBUG)
thetav = None
app = Flask(__name__, static_url_path='/public', static_folder='../')
@app.route('/video_feed')
def video_feed():
cap = cv2.VideoCapture(0)
# cap.set(3, 3840)
# cap.set(4, 1920)
return Response(stream_handler(cap), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
|
adkerr/tempest | tempest/api/compute/v3/servers/test_servers_negative.py | Python | apache-2.0 | 17,583 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import sys
import uuid
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
self.server_id = self.rebuild_server(self.server_id)
@classmethod
def setUpClass(cls):
super(ServersNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
cls.img_client = cls.images_client
cls.alt_os = clients.AltManager()
cls.alt_client = cls.alt_os.servers_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@attr(type=['negative', 'gate'])
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name='')
@attr(type=['negative', 'gate'])
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
personality=person)
@attr(type=['negative', 'gate'])
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
image_id=-1)
@attr(type=['negative', 'gate'])
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
flavor=-1,)
@attr(type=['negative', 'gate'])
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
| IPv4 = '1.1.1.1.1.1'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@attr(type=['negative', 'gate'])
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(exceptions.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@attr(type=['negative', 'gate'])
def t | est_resize_nonexistent_server(self):
nonexistent_server = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound,
self.client.resize,
nonexistent_server, self.flavor_ref)
@attr(type=['negative', 'gate'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
@attr(type=['negative', 'gate'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(exceptions.BadRequest, self.client.resize,
self.server_id, flavor_ref="")
@attr(type=['negative', 'gate'])
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
@attr(type=['negative', 'gate'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
self.addCleanup(self.client.unpause_server,
self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
@attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
# Rebuild and Reboot a deleted server
_, server = self.create_test_server()
self.client.delete_server(server['id'])
self.client.wait_for_server_termination(server['id'])
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
server['id'], self.image_ref_alt)
self.assertRaises(exceptions.NotFound, self.client.reboot,
server['id'], 'SOFT')
@attr(type=['negative', 'gate'])
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = str(uuid.uuid4())
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name('server')
file_contents = 'Test server rebuild.'
personality = [{'path': '/etc/rebuild.txt',
'contents': base64.b64encode(file_contents)}]
self.assertRaises(exceptions.NotFound,
self.client.rebuild,
nonexistent_server,
self.image_ref_alt,
name=new_name, meta=meta,
personality=personality,
adminPass='rebuild')
@attr(type=['negative', 'gate'])
def test_create_numeric_server_name(self):
# Create a server with a numeric name
if self.__class__._interface == "xml":
raise self.skipException("Not testable in XML")
server_name = 12345
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@attr(type=['negative', 'gate'])
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 256 characters
server_name = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
name=server_name)
@attr(type=['negative', 'gate'])
def test_create_with_invalid_network_uuid(self):
# Pass invalid network uuid while creating a server
networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
networks=networks)
@attr(type=['negative', 'gate'])
def test_create_with_non_existant_keypair(self):
# Pass a non-existent keypair while creating a server
key_name = data_utils.rand_name('key')
self.assertRaises(exceptions.BadRequest,
self.create_test_server,
key_name=key_name)
@attr(type=['negative', 'gate'])
def test_create_server_metadata_exceeds_length_limit(self):
# Pass really long metadata while creating a server
metadata = {'a': 'b' * 260}
self.assertRaises(exceptions.OverLimit,
self.crea |
dsp2003/e17p | src/e17p/remember11/ff/compression.py | Python | gpl-2.0 | 4,355 | 0.022273 | #!/usr/bin/env python3
#Copyright 2010 Sebastian Hagen
# This file is part of E17p.
#
# E17p is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# E17p is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# R11 compression format support.
import struct
from ...base.file_data import DataRefFile
class R11UnpackError(ValueError):
pass
class R11PackedDataRefFile(DataRefFile):
def unpack(self):
data = self.get_data()
len_p = len(data)
# straightforward adaption of decompressbip.c
(len_u,) = struct.unpack(b'<L', data[:4]) # dubious.
rv = bytearray(len_u+18)
off_i = 4
off_o = 0
mask = 0
while (off_i < len_p):
mask >>= 1
mask %= 0x100000000
if (mask < 0x100):
mask = data[off_i] | 0x8000
off_i += 1
if (mask & 1):
#print(len_u,off_o)
#print(off_o,len(rv),len_u, off_i,len(data))
if (off_o >= len_u):
raise R11UnpackError('R11 decompression output overrun at {}/{}.'.format(off_i, off_o))
rv[off_o+18] = data[off_i]
off_o += 1
off_i += 1
continue
roff = ((data[off_i+1] & 0xf0) << 4) + data[off_i] + 18
rlen = (data[off_i+1] & 0xf) + 3
m = (off_o & ~0xfff)|(roff&0xfff)
if (m >= off_o):
m -= 0x1000
if (m < -18):
raise R11UnpackError('R11 decompression failure: Unknown mtype {} at {}/{} (len {})'.format(m, off_i, off_o, rlen))
if ((off_o + rlen) > len_u):
raise R11UnpackError('R11 decompression output overrun at {}/{} (m {} len {})'.format(off_i, off_o, m, rlen))
if ((m + rlen) > len_u):
raise R11UnpackError('R11 decompression input overrun at {}/{} (m {} len {})'.format(off_i, off_o, m, rlen))
off_i += 2
if (abs(off_o-m) >= rlen):
rv[off_o+18:off_o+rlen+18] = rv[m+18:m+rlen+18]
else:
# Progressive memcpy. Doing it byte-for-byte in Py is kinda slow, so only do this if necessary.
for i in range(rlen):
rv[off_o+18+i] = rv[m+18+i]
#print(off_o,rlen, off_o-m, rlen)
off_o += rlen
if (off_o != len_u):
raise R11UnpackError('BIP decompression output underrun: Extracted {}/{} bytes.'.format(off_o, len_u))
return memoryview(rv)[18:]
@staticmethod
def pack_nullcompression(data_in):
# Straight adaption from compressbip.c.
len_i = | len(data_in)
len_o = (len_i*9+7)//8 + 4
|
data_out = bytearray(len_o)
data_out[:4] = struct.pack('<L', len_i)
o = 4
i = 0
while(i < len_i):
data_out[o] = 0xff
data_out[o+1:o+9] = data_in[i:i+8]
i += 8
o += 9
if (len(data_out) != len_o):
raise ValueError('Compression failed: Wrote {}/{} outbytes.'.format(len(data_out), len_o))
return data_out
def repack_nullcompression(self):
return self.pack_nullcompression(self.unpack())
def get_data_unpacked(self):
from io import BytesIO
d = self.unpack()
bio = BytesIO(d)
return DataRefFile(bio, 0, len(d))
def _main():
import optparse
op = optparse.OptionParser()
(opts, args) = op.parse_args()
fns = args
for fn in fns:
print('---------------- Reading file {!r}.'.format(fn))
f = open(fn, 'rb')
dref = R11PackedDataRefFile(f,0,f.seek(0,2))
fn_out = fn + '.d'
data = dref.get_data_unpacked().get_data()
print('--> {!r}: ({}) bytes.'.format(fn_out, len(data)))
f_out = open(fn_out, 'wb')
f_out.write(data)
if (__name__ == '__main__'):
_main()
|
evanbrumley/django-report-tools | docs/source/charts/bar_chart_example.py | Python | bsd-2-clause | 900 | 0.002222 | class MyReport(Report):
renderer = GoogleChartsRenderer
bar_chart = charts.BarC | hart(title="Pony Populations", width="500")
multiseries_ba | r_chart = charts.BarChart(title="Pony Populations by Country", width="500")
def get_data_for_bar_chart(self):
data = ChartData()
data.add_column("Pony Type")
data.add_column("Population")
data.add_row(["Blue", 20])
data.add_row(["Pink", 20])
data.add_row(["Magical", 1])
return data
def get_data_for_multiseries_bar_chart(self):
data = ChartData()
data.add_column("Pony Type")
data.add_column("Australian Population")
data.add_column("Switzerland Population")
data.add_column("USA Population")
data.add_row(["Blue", 5, 10, 5])
data.add_row(["Pink", 10, 2, 8])
data.add_row(["Magical", 1, 0, 0])
return data |
tkzeng/molecular-design-toolkit | moldesign/utils/callsigs.py | Python | apache-2.0 | 8,969 | 0.00301 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import os
from functools import wraps
import collections
import funcsigs
from .utils import if_not_none
from .docparsers import GoogleDocArgumentInjector
def args_from(original_function,
only=None,
allexcept=None,
inject_kwargs=None,
inject_docs=None,
wraps=None,
update_docstring_args=False):
"""
Decorator to transfer call signatures - helps to hide ugly *args and **kwargs in delegated calls
Args:
original_function (callable): the function to take the call signature from
only (List[str]): only transfer these arguments (incompatible with `allexcept`)
wraps (bool): Transfer documentation and attributes from original_function to
decorated_function, using functools.wraps (default: True if call signature is
unchanged, False otherwise)
allexcept (List[str]): transfer all except these arguments (incompatible with `only`)
inject_kwargs (dict): Inject new kwargs into the call signature
(of the form ``{argname: defaultvalue}``)
inject_docs (dict): Add or modifies argument documentation (requires google-style
docstrings) with a dict of the form `{argname: "(type): description"}`
update_docstring_args (bool): Update "arguments" section of the docstring using the
original function's documentation (requires google-style docstrings and wraps=False)
Note:
To use arguments from a classes' __init__ method, pass the class itself as
``original_function`` - this will also allow us to inject the documentation
Returns:
Decorator function
"""
# NEWFEATURE - verify arguments?
if only and allexcept:
raise ValueError('Error in keyword arguments - '
'pass *either* "only" or "allexcept", not both')
origname = get_qualified_name(original_function)
if hasattr(original_function, '__signature__'):
sig = original_function.__signature__.replace()
else:
sig = funcsigs.signature(original_function)
# Modify the call signature if necessary
if only or allexcept or inject_kwargs:
wraps = if_not_none(wraps, False)
newparams = []
if only:
for param in only:
newparams.append(sig.parameters[param])
elif allexcept:
for name, param in sig.parameters.iteritems | ():
if name not in allexcept:
newparams.append(param)
else:
newparams = sig.parameters.values()
if inject_kwargs:
for name, default in inject_kwargs.iteritems():
newp = funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_OR_KEYWORD,
default=default) |
newparams.append(newp)
newparams.sort(key=lambda param: param._kind)
sig = sig.replace(parameters=newparams)
else:
wraps = if_not_none(wraps, True)
# Get the docstring arguments
if update_docstring_args:
original_docs = GoogleDocArgumentInjector(original_function.__doc__)
argument_docstrings = collections.OrderedDict((p.name, original_docs.args[p.name])
for p in newparams)
def decorator(f):
"""Modify f's call signature (using the `__signature__` attribute)"""
if wraps:
fname = original_function.__name__
f = functools.wraps(original_function)(f)
f.__name__ = fname # revert name change
else:
fname = f.__name__
f.__signature__ = sig
if update_docstring_args or inject_kwargs:
if not update_docstring_args:
argument_docstrings = GoogleDocArgumentInjector(f.__doc__).args
docs = GoogleDocArgumentInjector(f.__doc__)
docs.args = argument_docstrings
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
# Only for building sphinx documentation:
if os.environ.get('SPHINX_IS_BUILDING_DOCS', ""):
sigstring = '%s%s\n' % (fname, sig)
if hasattr(f, '__doc__') and f.__doc__ is not None:
f.__doc__ = sigstring + f.__doc__
else:
f.__doc__ = sigstring
return f
return decorator
def kwargs_from(reference_function, mod_docs=True):
""" Replaces ``**kwargs`` in a call signature with keyword arguments from another function.
Args:
reference_function (function): function to get kwargs from
mod_docs (bool): whether to modify the decorated function's docstring
Note:
``mod_docs`` works ONLY for google-style docstrings
"""
refsig = funcsigs.signature(reference_function)
origname = get_qualified_name(reference_function)
kwparams = []
for name, param in refsig.parameters.iteritems():
if param.default != param.empty or param.kind in (param.VAR_KEYWORD, param.KEYWORD_ONLY):
if param.name[0] != '_':
kwparams.append(param)
if mod_docs:
refdocs = GoogleDocArgumentInjector(reference_function.__doc__)
def decorator(f):
sig = funcsigs.signature(f)
fparams = []
found_varkeyword = None
for name, param in sig.parameters.iteritems():
if param.kind == param.VAR_KEYWORD:
fparams.extend(kwparams)
found_varkeyword = name
else:
fparams.append(param)
if not found_varkeyword:
raise TypeError("Function has no **kwargs wildcard.")
f.__signature__ = sig.replace(parameters=fparams)
if mod_docs:
docs = GoogleDocArgumentInjector(f.__doc__)
new_args = collections.OrderedDict()
for argname, doc in docs.args.iteritems():
if argname == found_varkeyword:
for param in kwparams:
default_argdoc = '%s: argument for %s' % (param.name, origname)
new_args[param.name] = refdocs.args.get(param.name, default_argdoc)
else:
new_args[argname] = doc
docs.args = new_args
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
return f
return decorator
def get_qualified_name(original_function):
if inspect.ismethod(original_function):
origname = '.'.join([original_function.__module__,
original_function.im_class.__name__,
original_function.__name__])
return ':meth:`%s`' % origname
else:
origname = original_function.__module__+'.'+original_function.__name__
return ':meth:`%s`' % origname
class DocInherit(object):
"""
Allows methods to inherit docstrings from their superclasses
FROM http://code.activestate.com/recipes/576862/
"""
def __init__(self, mthd):
self.mthd = mthd
self.name = mthd.__name__
def __get__(self, obj, cls):
if obj:
return self.get_with_inst(obj, cls)
else:
return self.get_no_inst(cls)
def get_with_inst(self, obj, cls):
overridden = getattr(super(cls, obj), self.name, None)
@wraps |
alexdzul/friends | friends/apps/phonebook/admin.py | Python | mit | 299 | 0 | from django.contrib import admin
from friends.apps.phonebook.models import Contact
@admin.register(Contact)
class ContactAdmin(admin.ModelAdm | in):
list_display = ('id', 'first_name', 'last_name', 'phone_number')
list_filter = ('last_name', )
search_fields = ['fir | st_name', 'last_name']
|
oblique-labs/pyVM | rpython/rtyper/test/test_llann.py | Python | mit | 15,371 | 0.002797 | import py
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation
from rpython.conftest import option
from rpython.rtyper.annlowlevel import (annotate_lowlevel_helper,
MixLevelHelperAnnotator, PseudoHighLevelCallable, llhelper,
cast_instance_to_base_ptr, cast_base_ptr_to_instance)
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rtyper.lltypesystem.lltype import *
from rpython.rtyper.rclass import fishllattr, OBJECTPTR
from rpython.rtyper.test.test_llinterp import interpret
from rpython.translator.translator import TranslationContext
# helpers
def annotated_calls(ann, ops=('simple_call,')):
for block in ann.annotated:
for op in block.operations:
if op.opname in ops:
yield op
def derived(op, orig):
if op.args[0].value.__name__.startswith(orig):
return op.args[0].value
else:
return None
class TestLowLevelAnnotateTestCase:
from rpython.annotator.annrpython import RPythonAnnotator
def annotate(self, ll_function, argtypes):
self.a = self.RPythonAnnotator()
graph = annotate_lowlevel_helper(self.a, ll_function, argtypes)
if option.view:
self.a.translator.view()
return self.a.binding(graph.getreturnvar())
def test_simple(self):
S = GcStruct("s", ('v', Signed))
def llf():
s = malloc(S)
return s.v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_simple2(self):
S = Struct("s", ('v', Signed))
S2 = GcStruct("s2", ('a', S), ('b', S))
def llf():
s = malloc(S2)
return s.a.v+s.b.v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_array(self):
A = GcArray(('v', Signed))
def llf():
a = malloc(A, 1)
return a[0].v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_array_longlong(self):
from rpython.rlib.rarithmetic import r_longlong
A = GcArray(('v', Signed))
one = r_longlong(1)
def llf():
a = malloc(A, one)
return a[0].v
s = self.annotate(llf, [])
assert s.knowntype == int
def test_prim_array(self):
A = GcArray(Signed)
def llf():
a = malloc(A, 1)
return a[0]
s = self.annotate(llf, [])
assert s.knowntype == int
def test_prim_array_setitem(self):
A = GcArray(Signed)
def llf():
a = malloc(A, 1)
a[0] = 3
return a[0]
s = self.annotate(llf, [])
assert s.knowntype == int
def test_cast_simple_widening(self):
S2 = Struct("s2", ('a', Signed))
S1 = Struct("s1", ('sub1', S2), ('sub2', S2))
PS1 = Ptr(S1)
PS2 = Ptr(S2)
def llf(p1):
p2 = p1.sub1
p3 = cast_pointer(PS1, p2)
return p3
s = self.annotate(llf, [SomePtr(PS1)])
assert isinstance(s, SomePtr)
assert s.ll_ptrtype == PS1
def test_cast_simple_widening_from_gc(self):
S2 = GcStruct("s2", ('a', Signed))
S1 = GcStruct("s1", ('sub1', S2), ('x', Signed))
PS1 = Ptr(S1)
def llf():
p1 = malloc(S1)
p2 = p1.sub1
p3 = cast_pointer(PS1, p2)
return p3
s = self.annotate(llf, [])
assert isinstance(s, SomePtr)
assert s.ll_ptrtype == PS1
def test_cast_pointer(self):
S3 = GcStruct("s3", ('a', Signed))
S2 = GcStruct("s3", ('sub', S3))
S1 = GcStruct("s1", ('sub', S2))
PS1 = Ptr(S1)
PS2 = Ptr(S2)
PS3 = Ptr(S3)
def llf():
p1 = malloc(S1)
p2 = p1.sub
p3 = p2.sub
p12 = cast_pointer(PS1, p2)
p13 = cast_pointer(PS1, p3)
p21 = cast_pointer(PS2, p1)
p23 = cast_pointer(PS2, p3)
p31 = cast_pointer(PS3, p1)
p32 = cast_pointer(PS3, p2)
return p12, p13, p21, p23, p31, p32
s = self.annotate(llf, [])
assert [x.ll_ptrtype for x in s.items] == [PS1, PS1, PS2, PS2, PS3, PS3]
def test_array_length(self):
A = GcArray(('v', Signed))
def llf():
a = malloc(A, 1)
return len(a)
s = self.annotate(llf, [])
assert s.knowntype == int
def test_funcptr(self):
F = FuncType((Signed,), Signed)
PF = Ptr(F)
def llf(p):
return p(0)
s = self.annotate(llf, [SomePtr(PF)])
assert s.knowntype == int
def test_ll_calling_ll(self):
A = GcArray(Float)
B = GcArray(Signed)
def ll_make(T, n):
x = malloc(T, n)
return x
def ll_get(T, x, i):
return x[i]
def llf():
a = ll_make(A, 3)
b = ll_make(B, 2)
a[0] = 1.0
b[1] = 3
y0 = ll_get(A, a, 1)
y1 = ll_get(B, b, 1)
#
a2 = ll_make(A, 4)
a2[0] = 2.0
return ll_get(A, a2, 1)
s = self.annotate(llf, [])
a = self.a
assert s == annmodel.SomeFloat()
seen = {}
ngraphs = len(a.translator.graphs)
vTs = []
for call in annotated_calls(a):
if derived(call, "ll_"):
func, T = [x.value for x in call.args[0:2]]
if (func, T) in seen:
continue
seen[func, T] = True
desc = a.bookkeeper.getdesc(func)
g = desc.specialize([a.binding(x) for x in call.args[1:]])
args = g.getargs()
rv = g.getreturnvar()
if func is ll_get:
vT, vp, vi = args
assert a.binding(vT) == a.bookkeeper.immutablevalue(T)
assert a.binding(vi).knowntype == int
assert a.binding(vp).ll_ptrtype.TO == T
assert a.binding(rv) == lltype_to_annotation(T.OF)
elif func is ll_make:
vT, vn = args
assert a.binding(vT) == a.bookkeeper.immutablevalue(T)
assert a.binding(vn).knowntype == int
assert a.binding(rv).ll_ptrtype.TO == T
else:
assert False, func
vTs.append(vT)
assert len(seen) == 4
return a, vTs # reused by a test in test_rtyper
def test_ll_calling_ll2(self):
A = GcArray(Float)
B = GcArray(Signed)
def ll_make(T, n):
x = malloc(T, n)
return x
def ll_get(x, i):
return x[i]
def makelen4(T):
return ll_make(T, 4)
def llf():
a = ll_make(A, 3)
b = ll_make(B, 2)
a[0] = 1.0
b[1] = 3
y0 = ll_get(a, 1)
| y1 = ll_get(b, 1)
#
a2 = makelen4(A)
a2[0] = 2.0
return ll_get(a2, 1)
s = self.annotate(llf, [])
a = self.a
assert s == annmodel.SomeFloat()
seen = {}
def q(v):
s = a.binding(v)
if | s.is_constant():
return s.const
else:
return s.ll_ptrtype
vTs = []
for call in annotated_calls(a):
if derived(call, "ll_") or derived(call, "makelen4"):
func, T = [q(x) for x in call.args[0:2]]
if (func, T) in seen:
continue
seen[func, T] = True
desc = a.bookkeeper.getdesc(func)
g = desc.specialize([a.binding(x) for x in call.args[1:]])
args = g.getargs()
rv = g.getreturnvar()
if func is ll_make:
vT, vn = args
assert a.binding(vT) == a.bookkeeper.immutablevalue(T)
assert a.binding(vn).knowntype == int
assert a.binding( |
mjordan/linetweetbot | line_tweetbot.py | Python | unlicense | 3,096 | 0.003553 | #!/usr/bin/env python
from twitter import *
"""
Config variables
"""
# Set up authentication for this Twitter app.
oa_access_token = 'samplee320asuyiuwepjdiwu2'
oa_access_token_secret = 'samplemnbxcpw8383'
consumer_key = 'samplepekdncisyqgeq8d44d'
consumer_secret = 'sampleooojwydcw53ip'
# We read and write to the same data file, popping the first
# line off it and tweeting that line. If your data file is not
# in the same directory as this script, use a full path.
data_file_name = 'data.txt'
# String tacked onto the end of tweets to indicate that the
# sentence is comprised of multiple tweets. Be sure to include
# the leading space.
tweet_separator = ' [...]'
"""
Functions
"""
def g | et_chunks(line):
"""
Breaks lines up into chunks of a maximum of 140 characters long.
| However, we need to subtract the length of tweet_separator so we
can include it in tweets that contain partial sentences.
"""
chunk_length = 140 - len(tweet_separator)
line_length = len(line)
# If the line fits in one tweet, return it here.
if line_length < 140:
return [line]
# In the script's main logic, we loop through this list and tweet
# each entry.
tweetable_chunks = []
# Break up the current line into tweets, ensuring that each tweet
# breaks on a space, not within a word.
line_remainder = line
while len(line_remainder) > chunk_length:
# Get the first chunk_length characters in the line.
raw_slice = line[:chunk_length]
# Find the position of the last space in the chunk.
last_space = raw_slice.rfind(' ')
# Remove whatever in the line comes after the last space in
# the chunk.
trimmed_slice = line[:last_space]
# Add the remaining chunk, plus tweet_separator, to the list of tweets.
tweet = trimmed_slice.strip() + tweet_separator
tweetable_chunks.append(tweet)
# Get the string that follows the last space and reinitialize
# line with it.
line_remainder = line[last_space:]
line = line_remainder
# When line_remainder is less than chunk_length, add it to
# the list of tweets.
tweetable_chunks.append(line_remainder.strip())
return tweetable_chunks
"""
Main script
"""
if __name__ == '__main__':
# Open the data file and put the contents into an array
# so we can grab the first line.
with open(data_file_name) as f:
lines = f.readlines()
# If there is no data in the file, don't go any further.
if not len(lines):
exit
# Grab the first line.
line = lines.pop(0)
tweets = get_chunks(line.rstrip())
# Now that we have removed the first line, save the remaining ones
# back to the same file.
output_file = open(data_file_name, 'w')
for write_line in lines:
output_file.write(write_line)
# Send the tweet.
twitter = Twitter(auth=OAuth(oa_access_token, oa_access_token_secret, consumer_key, consumer_secret))
for tweet in tweets:
twitter.statuses.update(status = tweet.strip())
|
lino-framework/book | lino_book/projects/lets2/__init__.py | Python | bsd-2-clause | 86 | 0 | """This is the **second** appl | ication us | ed as example in
:doc:`/dev/lets/index`.
"""
|
f-prettyland/angr | angr/simos/windows.py | Python | bsd-2-clause | 20,273 | 0.00291 |
import os
import logging
import claripy
from ..errors import (
AngrSimOSError,
TracerEnvironmentError,
SimSegfaultException,
SimUnsupportedError,
SimZeroDivisionException,
)
from .. import sim_options as o
from ..tablespecs import StringTableSpec
from ..procedures import SIM_LIBRARIES as L
from .simos import SimOS
_l = logging.getLogger('angr.simos.windows')
class SimWindows(SimOS):
"""
Environemnt for the Windows Win32 subsystem. Does not support syscalls currently.
"""
def __init__(self, project):
super(SimWindows, self).__init__(project, name='Win32')
self._exception_handler = None
self.fmode_ptr = None
self.commode_ptr = None
self.acmdln_ptr = None
self.wcmdln_ptr = None
def configure_project(self):
super(SimWindows, self).configure_project()
# here are some symbols which we MUST hook, regardless of what the user wants
self._weak_hook_symbol('GetProcAddress', L['kernel32.dll'].get('GetProcAddress', self.arch))
self._weak_hook_symbol('LoadLibraryA', L['kernel32.dll'].get('LoadLibraryA', self.arch))
self._weak_hook_symbol('LoadLibraryExW', L['kernel32.dll'].get('LoadLibraryExW', self.arch))
self._exception_handler = self._find_or_make('KiUserExceptionDispatcher')
self.project.hook(self._exception_handler,
L['ntdll.dll'].get('KiUserExceptionDispatcher', self.arch),
replace=True)
self.fmode_ptr = self._find_or_make('_fmode')
self.commode_ptr = self._find_or_make('_commode')
self.acmdln_ptr = self._find_or_make('_acmdln')
self.wcmdln_ptr = self._find_or_make('_wcmdln')
def _find_or_make(self, name):
sym = self.project.loader.find_symbol(name)
if sym is None:
return self.project.loader.extern_object.get_pseudo_addr(name)
else:
return sym.rebased_addr
# pylint: disable=arguments-differ
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimWindows, self).state_entry(**kwargs)
if args is None: args = []
if env is None: env = {}
# Prepare argc
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) in (int, long): # pylint: disable=unidiomatic-typecheck
argc = claripy.BVV(argc, state.arch.bits)
# Make string table for args and env
table = StringTableSpec()
table.append_args(args)
table.append_env(env)
# calculate full command line, since this is windows and that's how everything works
cmdline = claripy.BVV(0, 0)
for arg in args:
if cmdline.length != 0:
cmdline = cmdline.concat(claripy.BVV(' '))
if type(arg) is str:
if '"' in arg or '\0' in arg:
raise AngrSimOSError("Can't handle windows args with quotes or nulls in them")
arg = claripy.BVV(arg)
elif isinstance(arg, claripy.ast.BV):
for byte in arg.chop(8):
state.solver.add(byte != claripy.BVV('"'))
state.solver.add(byte != claripy.BVV(0, 8))
else:
raise TypeError("Argument must be str or bitvector")
cmdline = cmdline.concat(claripy.BVV('"'), arg, claripy.BVV('"'))
cmdline = cmdline.concat(claripy.BVV(0, 8))
wcmdline = claripy.Concat(*(x.concat(0, 8) for x in cmdline.chop(8)))
if not state.satisfiable():
raise AngrSimOSError("Can't handle windows args with quotes or nulls in them")
# Dump the table onto the stack, calculate pointers to args, env
stack_ptr = state.regs.sp
stack_ptr -= 16
state.memory.store(stack_ptr, claripy.BVV(0, 8*16))
stack_ptr -= cmdline.length / 8
state.memory.store(stack_ptr, cmdline)
state.mem[self.acmdln_ptr].long = stack_ptr
stack_ptr -= wcmdline.length / 8
state.memory.store(stack_ptr, wcmdline)
state.mem[self.wcmdln_ptr].long = stack_ptr
argv = table.dump(state, stack_ptr)
envp = argv + ((len(args) + 1) * state.arch.bytes)
# Put argc on stack and fix the stack pointer
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
# store argc argv envp in the posix plugin
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.regs.sp = state.regs.sp - 0x80 # give us some stack space to work with
# fake return address from entry point
return_addr = self.return_deadend
kernel32 = self.project.loader.shared_objects.get('kernel32.dll', None)
if kernel32:
# some programs will use the return address from start to find the kernel32 base
return_addr = kernel32.get_symbol('ExitProcess').rebased_addr
if state.arch.name == 'X86':
state.mem[state.regs.sp].dword = return_addr
# first argument appears to be PEB
tib_addr = state.regs.fs.concat(state.solver.BVV(0, 16))
peb_addr = state.mem[tib_addr + 0x30].dword.resolved
state.mem[state.regs.sp + 4].dword = peb_addr
return state
def state_blank(self, **kwargs):
if self.project.loader.main_object.supports_nx:
add_options = kwargs.get('add_options', set())
add_options.add(o.ENABLE_NX)
kwargs['add_options'] = add_options
state = super(SimWindows, self).state_blank(**kwargs)
# yikes!!!
fun_stuff_addr = state.libc.mmap_base
if fun_stuff_addr & 0xffff != 0:
fun_stuff_addr += 0x10000 - (fun_stuff_addr & 0xffff)
state.memory.map_region(fun_stuff_addr, 0x2000, claripy.BVV(3, 3))
TIB_addr = fun_stuff_addr
PEB_addr = fun_stuff_addr + 0x1000
if state.arch.name == 'X86': |
LDR_addr = fun_stuff_addr + 0x2000
state.mem[TIB_addr + 0].dword = -1 # Initial SEH frame
state.mem[TIB_addr + 4].dword = state.regs.sp # stack base (high addr)
state.mem[TIB_addr + 8].dword = state.regs.sp - 0x100000 # stack limit (low addr)
state.mem[TIB_addr + 0x18].dword = TIB_addr # myself!
state.mem[TIB_addr + 0x24 | ].dword = 0xbad76ead # thread id
if self.project.loader.tls_object is not None:
state.mem[TIB_addr + 0x2c].dword = self.project.loader.tls_object.user_thread_pointer # tls array pointer
state.mem[TIB_addr + 0x30].dword = PEB_addr # PEB addr, of course
state.regs.fs = TIB_addr >> 16
state.mem[PEB_addr + 0xc].dword = LDR_addr
# OKAY IT'S TIME TO SUFFER
# http://sandsprite.com/CodeStuff/Understanding_the_Peb_Loader_Data_List.html
THUNK_SIZE = 0x100
num_pe_objects = len(self.project.loader.all_pe_objects)
thunk_alloc_size = THUNK_SIZE * (num_pe_objects + 1)
string_alloc_size = sum(len(obj.binary)*2 + 2 for obj in self.project.loader.all_pe_objects)
total_alloc_size = thunk_alloc_size + string_alloc_size
if total_alloc_size & 0xfff != 0:
total_alloc_size += 0x1000 - (total_alloc_size & 0xfff)
state.memory.map_region(LDR_addr, total_alloc_size, claripy.BVV(3, 3))
state.libc.mmap_base = LDR_addr + total_alloc_size
string_area = LDR_addr + thunk_alloc_size
for i, obj in enumerate(self.project.loader.all_pe_objects):
# Create a LDR_MODULE, we'll handle the links later...
obj.module_id = i+1 # HACK HACK HACK HACK
addr = LDR_addr + (i+1) * THUNK_SIZE
state.mem[addr+0x18].dword = obj.mapped_base
state.mem[addr+0x1C].dword = obj.entry
# Allocate some space from the same region to store the paths
path = obj.binary # we're |
onefinestay/sphinx-nameko-theme | docs/source/conf.py | Python | mit | 2,061 | 0 | # -*- coding: utf-8 -*-
"""Sphinx Readable Theme documentation build configuration file.
This file is execfile()d with the current directory set to its containing dir.
"""
import os
import sys
import pkg_resources
# Adding this directory to the sys path, to build autodoc of example module.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# -- General configurati | on ----------------------------------------------------
# Defining Sphinx extension modules.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.intersphinx']
autodoc_default_flags = ['members', 'show-inher | itance']
autodoc_member_order = 'bysource'
# Don't display module names before objects titles, it's more readable.
add_module_names = False
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
}
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sphinx Nameko Theme'
copyright = u'2015'
# The version info for the project, acts as replacement for |version| and
# |release|, also used in various other places throughout the built documents.
#
# The short X.Y version.
version = pkg_resources.get_distribution('sphinx_nameko_theme').version
# The full version, including alpha/beta/rc tags.
release = version
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = 'nameko'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxnamekothemedoc'
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'index',
'Sphinx Nameko Theme',
u'Sphinx Nameko Theme Documentation',
[u'onefinestay'],
1,
)
]
|
redbo/swift | swift/common/constraints.py | Python | apache-2.0 | 15,176 | 0.000066 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import time
import six
from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
from six.moves import urllib
from swift.common import utils, exceptions
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge, HTTPPreconditionFailed, HTTPNotImplemented, \
HTTPException
MAX_FILE_SIZE = 5368709122
MAX_META_NAME_LENGTH = 128
MAX_META_VALUE_LENGTH = 256
MAX_META_COUNT = 90
MAX_META_OVERALL_SIZE = 4096
MAX_HEADER_SIZE = 8192
MAX_OBJECT_NAME_LENGTH = 1024
CONTAINER_LISTING_LIMIT = 10000
ACCOUNT_LISTING_LIMIT = 10000
MAX_ACCOUNT_NAME_LENGTH = 256
MAX_CONTAINER_NAME_LENGTH = 256
VALID_API_VERSIONS = ["v1", "v1.0"]
EXTRA_HEADER_COUNT = 0
# If adding an entry to DEFAULT_CONSTRAINTS, note that
# these constraints are automatically published by the
# proxy server in responses to /info requests, with values
# updated by reload_constraints()
DEFAULT_CONSTRAINTS = {
'max_file_size': MAX_FILE_SIZE,
'max_meta_name_length': MAX_META_NAME_LENGTH,
'max_meta_value_length': MAX_META_VALUE_LENGTH,
'max_meta_count': MAX_META_COUNT,
'max_meta_overall_size': MAX_META_OVERALL_SIZE,
'max_header_size': MAX_HEADER_SIZE,
'max_object_name_length': MAX_OBJECT_NAME_LENGTH,
'container_listing_limit': CONTAINER_LISTING_LIMIT,
'account_listing_limit': ACCOUNT_LISTING_LIMIT,
'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH,
'max_container_name_length': MAX_CONTAINER_NAME_LENGTH,
'valid_api_versions': VALID_API_VERSIONS,
'extra_header_count': EXTRA_HEADER_COUNT,
}
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {} # any constraints overridden by SWIFT_CONF_FILE
EFFECTIVE_CONSTRAINTS = {} # populated by reload_constraints
def reload_constraints():
"""
Parse SWIFT_CONF_FILE and reset module level global constraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
"""
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name in DEFAULT_CONSTRAINTS:
try:
value = constraints_conf.get('swift-constraints', name)
except NoOptionError:
pass
except NoSectionError:
# We are never going to find the section for another option
break
else:
try:
value = int(value)
except ValueError:
value = utils.list_from_csv(value)
OVERRIDE_CONSTRAINTS[name] = value
for name, default in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
# "globals" in this context is module level globals, always.
globals()[name.upper()] = value
reload_constraints()
# Maximum slo segments in buffer
MAX_BUFFERED_SLO_SEGMENTS = 10000
#: Query string format= values to their corresponding content-type values
FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json',
'xml': 'application/xml'}
# By default the maximum number of allowed headers depends on the number of max
# allowed metadata settings plus a default value of 36 for swift internally
# generated headers and regular http headers. If for some reason this is not
# enough (custom middleware for example) it can be increased with the
# extra_header_count constraint.
MAX_HEADER_COUNT = MAX_META_COUNT + 36 + max(EXTRA_HEADER_COUNT, 0)
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers. This should only check
that the metadata in the request given is valid. Checks against
account/container overall metadata should be forwarded on to its
respective server to be checked.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
target_type = target_type.lower()
prefix = 'x-%s-meta-' % target_type
meta_count = 0
meta_size = 0
for key, value in req.headers.items():
if (isinstance(value, six.string_types)
and len(value) > MAX_HEADER_SIZE):
return HTTPBadRequest(body='Header value too long: %s' %
key[:MAX_META_NAME_LENGTH],
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
| request=req, content_type='text/plain')
bad_key = not check_utf8(key)
bad_value = value and not check_utf8(value)
if target_type in ('account', 'container') and (bad_key or bad_value):
return HTTPBadRequest(body='Metadata must be valid UTF-8',
req | uest=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body='Metadata name too long: %s%s' % (prefix, key),
request=req, content_type='text/plain')
if len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body='Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key),
request=req, content_type='text/plain')
if meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
if meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns: HTTPRequestEntityTooLarge -- the object is too large
:returns: HTTPLengthRequired -- missing content-length header and not
a chunked request
:returns: HTTPBadRequest -- missing or bad content-type header, or
bad metadata
:returns: HTTPNotImplemented -- unsupported transfer-encoding header value
"""
try:
ml = req.message_length()
except ValueError as e:
return HTTPBadRequest(request=req, content_type='text/plain',
body=str(e))
except AttributeError as e:
return HTTPNotImplemented(request=req, content_type='text/plain',
body=str(e))
if ml is not None and ml > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != |
exelearning/iteexe | exe/webui/templatemanagerpage.py | Python | gpl-2.0 | 11,217 | 0.002942 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ===========================================================================
# eXeLearning
# Copyright 2017, CeDeC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
# ===========================================================================
"""
The TemplateManagerPage is responsible for managing templates
"""
import json
import logging
import os
from zipfile import ZipFile
from exe.engine.path import Path
from exe.engine.template import Template
from exe.webui.livepage import allSe | ssionClients
from exe.webui.renderable import RenderableResource
from exe import | globals as G
from exe.engine.package import Package
from exe.export.pages import forbiddenPageNames
log = logging.getLogger(__name__)
class ImportTemplateError(Exception):
pass
class ImportTemplateExistsError(ImportTemplateError):
def __init__(self, local_template, absolute_template_dir, message=''):
self.local_template = local_template
self.absolute_template_dir = absolute_template_dir
if message == '':
self.message = u'Error importing template, local template already exists. '
else:
self.message = message
def __str__(self):
return self.message
pass
class TemplateManagerPage(RenderableResource):
"""
The TemplateManagerPage is responsible for managing templates
import / export / delete
"""
name = 'templatemanager'
def __init__(self, parent):
"""
Initialize
"""
RenderableResource.__init__(self, parent)
self.action = ""
self.properties = ""
self.template = ""
self.client = None
def render_GET(self, request):
"""Called for all requests to this object
Every JSON response sent must have an 'action' field, which value will
determine the panel to be displayed in the WebUI
"""
if self.action == 'Properties':
response = json.dumps({
'success': True,
'properties': self.properties,
'template': self.template,
'action': 'Properties'})
elif self.action == 'PreExport':
response = json.dumps({
'success': True,
'properties': self.properties,
'template': self.template,
'action': 'PreExport'})
else:
response = json.dumps({
'success': True,
'templates': self.renderListTemplates(),
'action': 'List'})
self.action = 'List'
return response
def render_POST(self, request):
""" Called on client form submit
Every form received must have an 'action' field, which value determines
the function to be executed in the server side.
The self.action attribute will be sent back to the client (see render_GET)
"""
self.reloadPanel(request.args['action'][0])
if request.args['action'][0] == 'doExport':
self.doExportTemplate(request.args['template'][0],
request.args['filename'][0])
elif request.args['action'][0] == 'doDelete':
self.doDeleteTemplate(request.args['template'][0])
elif request.args['action'][0] == 'doImport':
try:
self.doImportTemplate(request.args['filename'][0])
self.alert(
_(u'Success'),
_(u'Successfully imported template'))
except Exception, e:
self.alert(
_(u'Error'),
_(u'Error while installing template: %s') % str(e))
elif request.args['action'][0] == 'doProperties':
self.doPropertiesTemplate(request.args['template'][0])
elif request.args['action'][0] == 'doPreExport':
self.doPreExportTemplate(request.args['template'][0])
elif request.args['action'][0] == 'doEdit':
self.doEditTemplate(request.args['template'][0])
elif request.args['action'][0] == 'doList':
self.doList()
return ''
def reloadPanel(self, action):
self.client.sendScript('Ext.getCmp("templatemanagerwin").down("form").reload("%s")' % (action),
filter_func=allSessionClients)
def alert(self, title, mesg):
self.client.sendScript('Ext.Msg.alert("%s","%s")' % (title, mesg),
filter_func=allSessionClients)
def renderListTemplates(self):
"""
Returns a JSON response with a list of the installed templates
and its related buttons
"""
templates = []
templateStores = self.config.templateStore.getTemplates()
for template in templateStores:
export = True
delete = False
properties = True
edit = template.isEditable()
if template.name != 'Base' and template.name != self.config.defaultContentTemplate and edit:
delete = True
if template.name != self.config.defaultContentTemplate:
templates.append({'template': template.file,
'name': template.name,
'exportButton': export,
'deleteButton': delete,
'propertiesButton': properties,
'editButton': edit})
return templates
def doImportTemplate(self, filename):
""" Imports an template from a ELT file
Checks that it is a valid template file,
that the directory does not exist (prevent overwriting)
"""
log.debug("Import template from %s" % filename)
filename = Path(filename)
baseFile = filename.basename()
absoluteTargetDir = self.config.templatesDir / baseFile
try:
ZipFile(filename, 'r')
except IOError:
raise ImportTemplateError('Can not create dom object')
if os.path.exists(absoluteTargetDir):
template = Template(absoluteTargetDir)
raise ImportTemplateExistsError(template, absoluteTargetDir, u'Template already exists')
else:
filename.copyfile(absoluteTargetDir)
template = Template(absoluteTargetDir)
if template.isValid():
if not self.config.templateStore.addTemplate(template):
absoluteTargetDir.remove()
raise ImportTemplateExistsError(template, absoluteTargetDir, u'The template name already exists')
else:
absoluteTargetDir.remove()
raise ImportTemplateExistsError(template, absoluteTargetDir, u'Incorrect template format')
self.action = ""
def doExportTemplate(self, template, filename):
if filename != '':
templateExport = Template(self.config.templatesDir / template)
self.__exportTemplate(templateExport, unicode(filename))
def __exportTemplate(self, dirTemplateName, filename):
""" Exports t |
Elico-Corp/openerp-7.0 | wms/wizard/stock_to_date.py | Python | agpl-3.0 | 11,857 | 0.004218 | # -*- coding: utf-8 -*-
# © 2011 SYLEAM Info Services (http://www.Syleam.fr)
# © 2011 Sebastien LANGE (sebastien.lange@syleam.fr)
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
from openerp.osv import osv
from openerp.osv import fields
from openerp.addons.decimal_precision import decimal_precision as dp
from datetime import date
from dateutil.rrule import MO, FR
from dateutil.relativedelta import relativedelta
from openerp.tools.translate import _
class stock_to_date(osv.osv_memory):
_name = 'stock.to.date'
_description = 'Stock to date by product'
_rec_name = 'product_id'
def compute_stock_to_date(self, cr, uid, ids, context=None):
"""
Compute total quantity on lines
"""
product_obj = self.pool.get('product.product')
line_obj = self.pool.get('stock.to.date.line')
self.write(cr, uid, ids, {'stock_to_date_line_ids': [(5,)]}, context=context)
for wizard in self.browse(cr, uid, ids, context=context):
warehouse_ids = []
if not wizard.warehouse_id:
wids = self.pool.get('stock.warehouse').search(cr, uid, [], context=context)
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for w in self.pool.get('stock.warehouse').browse(cr, uid, wids, context=context):
if w.partner_address_id and w.partner_address_id.partner_id and user.company_id.partner_id == w.partner_address_id.partner_id:
warehouse_ids.append(w.id)
else:
warehouse_ids.append(wizard.warehouse_id.id)
if not warehouse_ids:
raise osv.except_osv(_('Warning !'), _('Please contact your administrator to configure warehouse in your profile.'))
tuple_warehouse_ids = tuple(warehouse_ids)
cr.execute(
"""
SELECT distinct(date_move), product_id, warehouse_id
FROM (
SELECT r.date::date AS date_move, r.product_id, %s AS warehouse_id
FROM stock_move r LEFT JOIN product_uom u ON (r.product_uom=u.id)
WHERE state IN ('confirmed','assigned','waiting','done','reserved')
AND product_id = %s
AND location_id IN (
WITH RECURSIVE location(id, parent_id) AS (
SELECT id, location_id FROM stock_location WHERE id IN (SELECT lot_stock_id FROM stock_warehouse WHERE id IN %s)
UNION
SELECT sl.id, sl.location_id FROM stock_location sl, location
WHERE sl.location_id = location.id)
SELECT id FROM location)
AND location_dest_id NOT IN (
WITH RECURSIVE location(id, parent_id) AS (
SELECT id, location_id FROM stock_location WHERE id IN (SELECT lot_stock_id FROM stock_warehouse WHERE id IN %s)
UNION
SELECT sl.id, sl.location_id FROM stock_location sl, location
WHERE sl.location_id = location.id)
SELECT id FROM location)
AND r.date::date >= %s AND r.date::date <= %s
GROUP BY r.date::date, product_id, warehouse_id
UNION ALL
SELECT r.date::date as date_move, r.product_id, %s AS warehouse_id
FROM stock_move r LEFT JOIN product_uom u on (r.product_uom=u.id)
WHERE state IN ('confirmed','assigned','waiting','done','reserved')
AND product_id = %s
AND location_dest_id IN (
WITH RECURSIVE location(id, parent_id) AS (
SELECT id, location_id FROM stock_location WHERE id IN (SELECT lot_stock_id FROM stock_warehouse WHERE id IN %s)
UNION
SELECT sl.id, sl.location_id FROM stock_location sl, location
WHERE sl.location_id = location.id)
SELECT id FROM location)
AND location_id NOT IN (
WITH RECURSIVE location(id, parent_id) AS (
SELECT id, location_id FROM stock_location WHERE id IN (SELECT lot_stock_id FROM stock_warehouse WHERE id IN %s)
UNION
SELECT sl.id, sl.location_id FROM stock_location sl, location
WHERE sl.location_id = location.id)
SELECT id FROM location)
AND r.date::date >= %s and r.date::date <= %s
GROUP B | Y r.date::date, product_id, warehouse_id
) subquery
ORDER BY date_move ASC
""",
(
tuple_warehouse_ids,
wizard.product_id.id,
tuple_wareho | use_ids,
tuple_warehouse_ids,
wizard.date_from,
wizard.date_to,
tuple_warehouse_ids,
wizard.product_id.id,
tuple_warehouse_ids,
tuple_warehouse_ids,
wizard.date_from,
wizard.date_to,
)
)
results = cr.fetchall()
today = date.today().strftime('%Y-%m-%d')
ok = False
for result in results:
if today in result:
ok = True
break
if not ok:
results.append((today, wizard.product_id.id, warehouse_ids))
for date_move, product_id, warehouse_ids in sorted(results):
ctx = context.copy()
if isinstance(warehouse_ids, (int, long)):
ctx.update({
'warehouse': warehouse_ids,
})
elif warehouse_ids and len(warehouse_ids) == 1:
ctx.update({
'warehouse': warehouse_ids[0],
})
ctx.update({
'to_date': date_move + ' 23:59:59',
'compute_child': True,
})
ctx2 = ctx.copy()
ctx2.update({
'from_date': date_move + ' 00:00:00',
})
product = product_obj.browse(cr, uid, product_id, context=ctx)
product2 = product_obj.browse(cr, uid, product_id, context=ctx2)
line_obj.create(cr, uid, {
'stock_to_date_id': wizard.id,
'date': date_move,
'virtual_available': product.virtual_available,
'incoming_qty': product2.incoming_qty,
'outgoing_qty': product2.outgoing_qty * -1,
'color': date_move == today and True or False,
}, context=context)
return True
def _get_orderpoint(self, cr, uid, ids, field_name, args, context=None):
"""
Get orderpoint for this product
"""
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
result = {}
for wizard in self.browse(cr, uid, ids, context=context):
result[wizard.id] = orderpoint_obj.search(cr, uid, [('product_id', '=', wizard.product_id.id)], context=context)
return result
def _get_report_stock(self, cr, uid, ids, field_name, args, context=None):
"""
Get stock avalaible by location for this product
"""
report_obj = self.pool.get('wms.report.stock.available')
result = {}
for wizard in self.browse(cr, uid, ids, context=context):
result[wizard.id] = report_obj.search(cr, uid, [('usage', '=', 'internal'), ('product_id', '=', wizard.product_id.id)], context=context)
return result
_columns = {
'product_id': fields.many2one('product.product', 'Product', required=True |
ZeitOnline/zeit.website | src/zeit/website/preview.py | Python | bsd-3-clause | 377 | 0 | import grokcore.component as grok
import zeit.cms.browser.preview
import zeit.website.interfaces
@grok.adapter(zeit.website.interfaces.IWebsiteContent, basestring)
@grok.implementer(zeit.cms.browser.interfaces.IPreviewURL)
def preview_u | rl(content, preview_type):
return zeit.cms.browser.preview.prefixed_url(
'website-%s-prefix' % preview_type, content.uniqu | eId)
|
hasadna/OpenCommunity | src/users/tests/invitation_test.py | Python | bsd-3-clause | 4,187 | 0.003583 | from django.core import mail
from django.test import TestCase
from users.default_roles import DefaultGroups
from users.models import Invitation, Membership, OCUser
from communities.tests.common import create_sample_community
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
class InvitationTest(TestCase):
def setUp(self):
(self.community, self.members, self.chairmens) = create_sample_community()
def tearDown(self):
mail.outbox = []
def test_send_invitation(self):
i = Invitation.objects.create(community=self.community,
created_by=self.members[0],
email="xxx@xyz.com")
i.send(self.members[0])
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.community.name, mail.outbox[0].subject)
self.assertIn(i.get_absolute_url(), mail.outbox[0].body)
class InvitationViewTest(TestCase):
def setUp(self):
(self.community, self.members, self.chairmen) = create_sample_community()
def tearDown(self):
mail.outbox = []
def post_invite(self, data=None):
if not data:
data = {"email": "sample@email.com",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"}
return self.client.post(reverse("members"
, kwargs={"community_id": self.community.id}),
data)
def login_chairmen(self):
self.client.login(username=self.chairmen[0].email, password="password")
def test_view(self):
self.login_chairmen()
response = self.post_invite({"email": "sample@email.com",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"})
self.assertEqual(Invitation.objects.all().count(), 1)
invitation = Invitation.objects.all()[0]
self.assertEqual(invitation.community, self.community)
self.assertEqual(invitation.created_by, self.chairmen[0])
self.assertEqual(invitation.message, "the message")
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(response.status_code, 200)
#the response is an ajax response the show the user as added
#to the list of members
self.assertIn("delete-invitation", response.content)
self.assertIn("sample@email.com", response.content)
def test_no_invite_permission(self):
self.client.login(username=self.members[6].email, password="password")
response = self.post_invite()
self.assertEqual(response.status_code, 403)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(Invitation.objects.all().count(), 0)
def test_bad_email(self):
self.login_chairmen()
response = self.post_invite({"email": "not a real email",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"})
self.assertEqual(response.status_code, 400)
self.assertEqual(_("Form error. Please supply a valid email."), response.content)
def test_invitee_already_invited(self):
Invitation.objects.create(community=self.community,
| created_by=self.chairmen[0],
email="sample@email.com")
self.login_chairmen()
response = self.post_invite()
self.assertEqual(response.status_code, 400)
self.assertEqual(_("This user is already invited to this community."), response.content)
def test_invitee_already_a_member(self):
u = OCUser.objects.create_user("sample@email.com",
| "sample user", password="password")
Membership.objects.create(user=u, community=self.community, default_group_name=DefaultGroups.MEMBER)
self.login_chairmen()
response = self.post_invite()
self.assertEqual(response.status_code, 400)
self.assertEqual(_("This user already a member of this community."), response.content) |
CWSL/access-cm-tools | operations/compress_nc.py | Python | apache-2.0 | 1,765 | 0.002266 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import os
import multiprocessing as mp
import subprocess as sp
import tempfile
import shlex
import shutil
"""
Compress all netcdf files under a directories.
compress_nc.py ~/exps/access/cm_1440x1080-1/archive/ ./'
This will recursively search under that directory and compress every single netcdf file.
"""
def compress_netcdf_file(filename, compression_level=7):
"""
Use nccopy to compress a netcdf file.
"""
_, tmp = tempfile.mkstemp()
cmd = 'nccopy -d {} {} {}'.format(compression_level, filename, tmp)
print(cmd)
ret = sp.call(shlex.split(cmd))
assert(ret == 0)
# Put a file lock on 'filename'?
shutil.move(tmp, filename)
def find_netcdf_files(path):
"""
Return full path of all netcdf files under 'path'
"""
netcdf_files = []
for root, dirs, files in os.walk(path): |
for file in files:
| if file.endswith(".nc"):
full_path = os.path.join(os.path.abspath(root), file)
if not os.path.islink(full_path):
netcdf_files.append(full_path)
return netcdf_files
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dirs", nargs='+',
help="A list of directories to search for netcdf files.")
args = parser.parse_args()
all_netcdf_files = []
for d in args.dirs:
all_netcdf_files.extend(find_netcdf_files(d))
# Make sure there are no duplicates.
all_netcdf_files = list(set(all_netcdf_files))
pool = mp.Pool()
results = pool.map(compress_netcdf_file, all_netcdf_files)
pool.close()
pool.join()
if __name__ == "__main__":
sys.exit(main())
|
wjo1212/aliyun-log-python-sdk | tests/es_migration/test_mapping_index_converter.py | Python | mit | 3,908 | 0.000512 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
import unittest
from aliyun.log.es_migration.mapping_index_converter import \
MappingIndexConverter
class TestMappingIndexConverter(unittest.TestCase):
def test_to_index_config(self):
mapping = {
"properties": {
"es_text": {
"type": "text"
},
"es_keyword": {
"type": "keyword"
},
"es_long": {
"type": "long"
},
"es_integer": {
"type": "integer"
},
"es_short": {
"type": "short"
},
"es_byte": {
"type": "byte"
},
"es_double": {
"type": "double"
},
"es_float": {
"type": "float"
},
"es_half_float": {
"type": "half_float"
},
"es_scaled_float": {
"type": "scaled_float",
"scaling_factor": 100
},
"es_date": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
},
"es_boolean": {
"type": "boolean"
},
"es_binary": {
"type": "binary"
},
"es_integer_range": {
"type": "integer_range"
},
"es_float_range": {
"type": "float_range"
},
"es_long_range": {
"type": "long_range"
},
"es_double_range": {
"type": "double_range"
},
"es_date_range": {
"type": "date_range",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
},
"es_ip_range": {
"type": "ip_range"
},
"es_object": {
"properties": {
"sub_text": {"type": "text"},
"sub_long": {"type": "long"},
"sub_double": {"type": "double"},
"sub_boolean": {"type": "boolean"},
"sub_date": {
"type": "date",
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
},
"sub_byte": {"type": "byte"},
"sub_double_range": {
"type": "double_range"
},
"sub_object": {
"properties": {
"sub_text": {"type": "text"},
"sub_boolean": {"type": "boolean"}
}
}
}
},
"es_geo_point": {
"type": "geo_point"
},
"es_geo_shape": {
"type": "geo_shape"
}
}
}
index_config = MappingIndexConv | erter.to_index_config(mapping)
line_config = index_config.line_config
self.assertEqual(MappingIndexConverter.DEFAULT_TOKEN_LIST, line_config.token_list) |
self.assertTrue(line_config.chn)
def test_to_index_config_with_none(self):
index_config = MappingIndexConverter.to_index_config(None)
self.assertEqual(None, index_config)
if __name__ == '__main__':
unittest.main()
|
honur/django-tenant-schemas | tenant_schemas/tests/models.py | Python | mit | 354 | 0 | from django.db import models
from tenant_schemas.models import TenantMixin
# as Te | nantMixin is an abstract model, it needs to be created
class Tenant(TenantMixin):
pass
class Meta:
app_label = 'tenant_schemas'
class NonAutoSyncTenant(TenantMixin):
auto_create_schema = False
class | Meta:
app_label = 'tenant_schemas'
|
scipy/scipy-svn | scipy/sparse/csgraph.py | Python | bsd-3-clause | 2,341 | 0.000854 | """Compressed Sparse graph algorithms"""
__docformat__ = "restructuredtext en"
__all__ = ['cs_graph_components']
import numpy as np
from sparsetools import cs_graph_components as _cs_graph_components
from csr import csr_matrix
from base import isspmatrix
_msg0 = 'x must be a symmetric square matrix!'
_msg1 = _msg0 + '(has shape %s)'
def cs_graph_components(x):
"""
Determine connected compoments of a graph stored as a compressed
sparse row or column matrix. For speed reasons, the symmetry of the
matrix x is not checked. A nonzero at index `(i, j)` means that node
`i` is connected to node `j` by an edge. The number of rows/columns
of the matrix thus corresponds to the number of nodes in the graph.
| Parameters
-----------
x: nda | rray-like, 2 dimensions, or sparse matrix
The adjacency matrix of the graph. Only the upper triangular part
is used.
Returns
--------
n_comp: int
The number of connected components.
label: ndarray (ints, 1 dimension):
The label array of each connected component (-2 is used to
indicate empty rows in the matrix: 0 everywhere, including
diagonal). This array has the length of the number of nodes,
i.e. one label for each node of the graph. Nodes having the same
label belong to the same connected component.
Notes
------
The matrix is assumed to be symmetric and the upper triangular part
of the matrix is used. The matrix is converted to a CSR matrix unless
it is already a CSR.
Examples
--------
>>> from scipy.sparse import cs_graph_components
>>> import numpy as np
>>> D = np.eye(4)
>>> D[0,1] = D[1,0] = 1
>>> cs_graph_components(D)
(3, array([0, 0, 1, 2]))
>>> from scipy.sparse import dok_matrix
>>> cs_graph_components(dok_matrix(D))
(3, array([0, 0, 1, 2]))
"""
try:
shape = x.shape
except AttributeError:
raise ValueError(_msg0)
if not ((len(x.shape) == 2) and (x.shape[0] == x.shape[1])):
raise ValueError(_msg1 % x.shape)
if isspmatrix(x):
x = x.tocsr()
else:
x = csr_matrix(x)
label = np.empty((shape[0],), dtype=x.indptr.dtype)
n_comp = _cs_graph_components(shape[0], x.indptr, x.indices, label)
return n_comp, label
|
cournape/responses | setup.py | Python | apache-2.0 | 1,534 | 0 | #!/usr/bin/env python
"""
responses
=========
A utility library for mocking out the `requests` Python library.
:copyright: (c) 2013 Dropbox, Inc.
"""
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sys
setup_requires = []
if 'test' in sys.argv:
setup_requires.append('pytest')
install_requires = [
'requests',
'mock',
'six',
]
tests_require = [
'pytest',
'pytest-cov',
'flake8',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand | .finalize_options(self)
self.test_args = ['test_responses.py']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno | )
setup(
name='responses',
version='0.2.2',
author='David Cramer',
description=(
'A utility library for mocking out the `requests` Python library.'
),
long_description=open('README.rst').read(),
py_modules=['responses'],
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
},
tests_require=tests_require,
setup_requires=setup_requires,
cmdclass={'test': PyTest},
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
interlegis/sapl | sapl/base/models.py | Python | gpl-3.0 | 12,876 | 0.000234 | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.deletion import CASCADE
from django.db.models.signals import post_migrate
from django.db.utils import DEFAULT_DB_ALIAS
from django.utils.translation import ugettext_lazy as _
import reversion
from sapl.utils import (LISTA_DE_UFS, YES_NO_CHOICES,
get_settings_auth_user_model, models_with_gr_for_model)
DOC_ADM_OSTENSIVO = 'O'
DOC_ADM_RESTRITIVO = 'R'
TIPO_DOCUMENTO_ADMINISTRATIVO = ((DOC_ADM_OSTENSIVO, _('Ostensiva')),
(DOC_ADM_RESTRITIVO, _('Restritiva')))
RELATORIO_ATOS_ACESSADOS = (('S', _('Sim')),
('N', _('Não')))
SEQUENCIA_NUMERACAO_PROTOCOLO = (('A', _('Sequencial por ano')),
('L', _('Sequencial por legislatura')),
('U', _('Sequencial único')))
SEQUENCIA_NUMERACAO_PROPOSICAO = (('A', _('Sequencial por ano para cada autor')),
('B', _('Sequencial por ano indepententemente do autor')))
ESFERA_FEDERACAO_CHOICES = (('M', _('Municipal')),
('E', _('Estadual')),
('F', _('Federal')),
)
ASSINATURA_ATA_CHOICES = (
('M', _('Mesa Diretora da Sessão')),
('P', _('Apenas o Presidente da Sessão')),
('T', _('Todos os Parlamentares Presentes na Sessão')),
)
@reversion.register()
class CasaLegislativa(models.Model):
# TODO ajustar todos os max_length !!!!
# cod_casa => id (pk)
codigo = models.CharField(max_length=100,
blank=True,
verbose_name=_('Codigo'))
nome = models.CharField(max_length=100, verbose_name=_('Nome'))
sigla = models.CharField(max_length=100, verbose_name=_('Sigla'))
endereco = models.CharField(max_length=100, verbose_name=_('Endereço'))
cep = models.CharField(max_length=100, verbose_name=_('CEP'))
municipio = models.CharField(max_length=50, verbose_name=_('Município'))
uf = models.CharField(max_length=2,
choices=LISTA_DE_UFS,
verbose_name=_('UF'))
telefone = models.CharField(
max_length=100, blank=True, verbose_name=_('Telefone'))
fax = models.CharField(
max_length=100, blank=True, verbose_name=_('Fax'))
logotipo = models.ImageField(
blank=True,
upload_to='sapl/public/casa/logotipo/',
verbose_name=_('Logotipo'))
endereco_web = models.URLField(
max_length=100, blank=True, verbose_name=_('HomePage'))
email = models.EmailField(
max_length=100, blank=True, verbose_name=_('E-mail'))
informacao_geral = models.TextField(
max_length=100,
blank=True,
verbose_name=_('Informação Geral'))
class Meta:
verbose_name = _('Casa Legislativa')
verbose_name_plural = _('Casa Legislativa')
ordering = ('id',)
def __str__(self):
return _('Casa Legislativa de %(municipio)s') % {
'municipio': self.municipio}
@reversion.register()
class AppConfig(models.Model):
POLITICA_PROTOCOLO_CHOICES = (
('O', _('Sempre Gerar Protocolo')),
('C', _( | 'Perguntar se é pra gerar protocolo ao incorporar')),
('N', _('Nunca Protocolar ao incorporar uma proposição')),
)
documentos_administrativos = models.CharField(
max_length=1,
verbose_name=_('Visibilidade dos Documentos Administrativos'),
choices=TIPO_DOCUMENTO_ADMINISTRATIV | O, default='O')
estatisticas_acesso_normas = models.CharField(
max_length=1,
verbose_name=_('Estatísticas de acesso a normas'),
choices=RELATORIO_ATOS_ACESSADOS, default='N')
sequencia_numeracao_proposicao = models.CharField(
max_length=1,
verbose_name=_('Sequência de numeração de proposições'),
choices=SEQUENCIA_NUMERACAO_PROPOSICAO, default='A')
sequencia_numeracao_protocolo = models.CharField(
max_length=1,
verbose_name=_('Sequência de numeração de protocolos'),
choices=SEQUENCIA_NUMERACAO_PROTOCOLO, default='A')
inicio_numeracao_protocolo = models.PositiveIntegerField(
verbose_name=_('Início da numeração de protocolo'),
default=1
)
esfera_federacao = models.CharField(
max_length=1,
blank=True,
default="",
verbose_name=_('Esfera Federação'),
choices=ESFERA_FEDERACAO_CHOICES)
# TODO: a ser implementado na versão 3.2
# painel_aberto = models.BooleanField(
# verbose_name=_('Painel aberto para usuário anônimo'),
# choices=YES_NO_CHOICES, default=False)
texto_articulado_proposicao = models.BooleanField(
verbose_name=_('Usar Textos Articulados para Proposições'),
choices=YES_NO_CHOICES, default=False)
texto_articulado_materia = models.BooleanField(
verbose_name=_('Usar Textos Articulados para Matérias'),
choices=YES_NO_CHOICES, default=False)
texto_articulado_norma = models.BooleanField(
verbose_name=_('Usar Textos Articulados para Normas'),
choices=YES_NO_CHOICES, default=True)
proposicao_incorporacao_obrigatoria = models.CharField(
verbose_name=_('Regra de incorporação de proposições e protocolo'),
max_length=1, choices=POLITICA_PROTOCOLO_CHOICES, default='O')
assinatura_ata = models.CharField(
verbose_name=_('Quem deve assinar a ata'),
max_length=1, choices=ASSINATURA_ATA_CHOICES, default='T')
cronometro_discurso = models.DurationField(
verbose_name=_('Cronômetro do Discurso'),
blank=True,
null=True)
cronometro_aparte = models.DurationField(
verbose_name=_('Cronômetro do Aparte'),
blank=True,
null=True)
cronometro_ordem = models.DurationField(
verbose_name=_('Cronômetro da Ordem'),
blank=True,
null=True)
cronometro_consideracoes = models.DurationField(
verbose_name=_('Cronômetro de Considerações Finais'),
blank=True,
null=True)
mostrar_brasao_painel = models.BooleanField(
default=False,
verbose_name=_('Mostrar brasão da Casa no painel?'))
receber_recibo_proposicao = models.BooleanField(
verbose_name=_('Protocolar proposição somente com recibo?'),
choices=YES_NO_CHOICES, default=True)
protocolo_manual = models.BooleanField(
verbose_name=_('Informar data e hora de protocolo?'),
choices=YES_NO_CHOICES, default=False)
escolher_numero_materia_proposicao = models.BooleanField(
verbose_name=_(
'Indicar número da matéria a ser gerada na proposição?'),
choices=YES_NO_CHOICES, default=False)
tramitacao_materia = models.BooleanField(
verbose_name=_(
'Tramitar matérias anexadas junto com as matérias principais?'),
choices=YES_NO_CHOICES, default=True)
tramitacao_documento = models.BooleanField(
verbose_name=_(
'Tramitar documentos anexados junto com os documentos principais?'),
choices=YES_NO_CHOICES, default=True)
google_recaptcha_site_key = models.CharField(
verbose_name=_('Chave pública gerada pelo Google Recaptcha'),
max_length=256, default='')
google_recaptcha_secret_key = models.CharField(
verbose_name=_('Chave privada gerada pelo Google Recaptcha'),
max_length=256, default='')
sapl_as_sapn = models.BooleanField(
verbose_name=_(
'Utilizar SAPL como SAPN?'),
choices=YES_NO_CHOICES, default=False)
class Meta:
verbose_name = _('Configurações da Aplicação')
verbose_name_plural = _('Configurações da Aplicação')
permissions = (
('menu_sistemas', _('Renderizar Menu Sistemas')),
('view_tabelas_auxiliares', _('Visualizar Tabelas Auxiliares')),
)
ordering = ('-id',)
@classmethod
def attr(cls, attr):
config = AppConfig.objects.first() |
nithincvpoyyil/nithincvpoyyil.github.io | test.py | Python | mit | 1,191 | 0.036104 | def goodSegement1(badList,l,r):
sortedBadList = sorted(badList)
current =sortedBadList[0]
maxVal = 0
for i in range(len(sortedBadList)):
current = sortedBadList[i]
maxIndex = i+1
# first value
if i == 0 and l<=current<=r:
val = current - l
prev = l
print("first index value")
print("prev, current : ",prev,current)
if(val>maxVal):
maxVal = val
print("1. (s,e)",l,current)
# other middle values
elif l<=current<=r:
prev = sortedBadList[i-1]
val = current - prev
print("prev, current : ",prev,current)
if(val>maxVal):
maxVal = val
print("2. (s,e)",prev,current)
# last value
if maxIndex == len(sortedBadList) and l<=current<=r:
print("last index value")
next = r
val = next - current
if(val>maxVal):
maxVal = val
print("3. ( | s,e)",current,next)
print("maxVal:",maxVal-1)
pass
goodSegement1([2,5,8,10,3],1,12)
goodS | egement1([37,7,22,15,49,60],3,48)
|
c3nav/c3nav | src/c3nav/editor/api.py | Python | apache-2.0 | 29,810 | 0.003187 | from functools import wraps
from itertools import chain
from django.db.models import Prefetch, Q
from django.urls import Resolver404, resolve
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, ParseError, PermissionDenied, ValidationError
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet
from shapely import prepared
from shapely.ops import cascaded_union
from c3nav.api.utils import get_api_post_data
from c3nav.editor.forms import ChangeSetForm, RejectForm
from c3nav.editor.models import ChangeSet
from c3nav.editor.utils import LevelChildEditUtils, SpaceChildEditUtils
from c3nav.editor.views.base import etag_func
from c3nav.mapdata.api import api_etag
from c3nav.mapdata.models import Area, MapUpdate, Source
from c3nav.mapdata.models.geometry.space import POI
from c3nav.mapdata.utils.user import can_access_editor
class EditorViewSetMixin(ViewSet):
def initial(self, request, *args, **kwargs):
if not can_access_editor(request):
raise PermissionDenied
return super().initial(request, *args, **kwargs)
def api_etag_with_update_cache_key(**outkwargs):
outkwargs.setdefault('cache_kwargs', {})['update_cache_key_match'] = bool
def wrapper(func):
func = api_etag(**outkwargs)(func)
@wraps(func)
def wrapped_func(self, request, *args, **kwargs):
try:
changeset = request.changeset
except AttributeError:
changeset = ChangeSet.get_for_request(request)
request.changeset = changeset
update_cache_key = request.changeset.raw_cache_key_without_changes
update_cache_key_match = request.GET.get('update_cache_key') == update_cache_key
return func(self, request, *args,
update_cache_key=update_cache_key, update_cache_key_match=update_cache_key_match,
**kwargs)
return wrapped_func
return wrapper
class EditorViewSet(EditorViewSetMixin, ViewSet):
"""
Editor API
/geometries/ returns a list of geojson features, you have to specify ?level=<id> or ?space=<id>
/geometrystyles/ returns styling information for all geometry types
/bounds/ returns the maximum bounds of the map
/{path}/ insert an editor path to get an API represantation of it. POST requests on forms are possible as well
"""
lookup_field = 'path'
lookup_value_regex = r'.+'
@staticmethod
def _get_level_geometries(level):
buildings = level.buildings.all()
buildings_geom = cascaded_union([building.geometry for building in buildings])
spaces = {space.pk: space for space in level.spaces.all()}
holes_geom = []
for space in spaces.values():
if space.outside:
space.geometry = space.geometry.difference(buildings_geom)
columns = [column.geometry for column in space.columns.all()]
if columns:
columns_geom = cascaded_union([column.geometry for column in space.columns.all()])
space.geometry = space.geometry.difference(columns_geom)
holes = [hole.geometry for hole in space.holes.all()]
if holes:
space_holes_geom = cascaded_union(holes)
holes_geom.append(space_holes_geom.intersection(space.geometry))
space.geometry = space.geometry.difference(space_holes_geom)
for building in buildings:
building.original_geometry = building.geometry
if holes_geom:
holes_geom = cascaded_union(holes_geom)
holes_geom_prep = prepared.prep(holes_geom)
for obj in buildings:
if holes_geom_prep.intersects(obj.geometry):
obj.geometry = obj.geometry.difference(holes_geom)
results = []
results.extend(buildings)
for door in level.doors.all():
results.append(door)
results.extend(spaces.values())
return results
@staticmethod
def _get_levels_pk(request, level):
# noinspection PyPep8Naming
Level = request.changeset.wrap_model('Level')
levels_under = ()
levels_on_top = ()
lower_level = level.lower(Level).first()
primary_levels = (level,) + ((lower_level,) if lower_level else ())
secondary_levels = Level.objects.filter(on_top_of__in=primary_levels).values_list('pk', 'on_top_of')
if lower_level:
levels_under = tuple(pk for pk, on_top_of in secondary_levels if on_top_of == lower_level.pk)
if True:
levels_on_top = tuple(pk for pk, on_top_of in secondary_levels if on_top_of == level.pk)
levels = chain([level.pk], levels_under, levels_on_top)
return levels, levels_on_top, levels_under
@staticmethod
def area_sorting_func(area):
groups = tuple(area.groups.all())
if not groups:
return (0, 0, 0)
return (1, groups[0].category.priority, groups[0].hierarchy, groups[0].priority)
# noinspection PyPep8Naming
@action(detail=False, methods=['get'])
@api_etag_with_update_cache_key(etag_func=etag_func, cache_parameters={'level': str, 'space': str})
def geometries(self, request, update_cache_key, update_cache_key_match, *args, **kwargs):
Level = request.changeset.wrap_model('Level')
Space = request.changeset.wrap_model('Space')
Column = request.changeset.wrap_model('Column')
Hole = request.changeset.wrap_model('Hole')
AltitudeMarker = request.changeset.wrap_model('AltitudeMarker')
Building = request. | changeset.wrap_model('Building')
Door = request.changeset.wrap_model('Door')
LocationGroup = request.changeset.wrap_model('LocationGroup')
WifiMeasurement = request.changeset.wrap_model('WifiMeasurement')
level = request.GET.get('level')
space = request.GET.get('space')
if level is not None:
if space is not None:
raise ValidationError('O | nly level or space can be specified.')
level = get_object_or_404(Level.objects.filter(Level.q_for_request(request)), pk=level)
edit_utils = LevelChildEditUtils(level, request)
if not edit_utils.can_access_child_base_mapdata:
raise PermissionDenied
levels, levels_on_top, levels_under = self._get_levels_pk(request, level)
# don't prefetch groups for now as changesets do not yet work with m2m-prefetches
levels = Level.objects.filter(pk__in=levels).filter(Level.q_for_request(request))
# graphnodes_qs = request.changeset.wrap_model('GraphNode').objects.all()
levels = levels.prefetch_related(
Prefetch('spaces', Space.objects.filter(Space.q_for_request(request)).only(
'geometry', 'level', 'outside'
)),
Prefetch('doors', Door.objects.filter(Door.q_for_request(request)).only('geometry', 'level')),
Prefetch('spaces__columns', Column.objects.filter(
Q(access_restriction__isnull=True) | ~Column.q_for_request(request)
).only('geometry', 'space')),
Prefetch('spaces__groups', LocationGroup.objects.only(
'color', 'category', 'priority', 'hierarchy', 'category__priority', 'category__allow_spaces'
)),
Prefetch('buildings', Building.objects.only('geometry', 'level')),
Prefetch('spaces__holes', Hole.objects.only('geometry', 'space')),
Prefetch('spaces__altitudemarkers', AltitudeMarker.objects.only('geometry', 'space')),
Prefetch('spaces__wifi_measurements', WifiMeasurement.objects.only('geometry', 'space')),
# Prefetch('spaces__graphnodes', graphnode |
beni55/bookmarkdown | setup.py | Python | mit | 1,280 | 0.004688 | """blatter"""
setup_info = dict(
name="bookmarkdown",
description="Write books in Markdown.",
version="0.0.1",
author='Steve Losh',
author_email='steve@stevelosh.com',
license='MIT License',
url='http://bitbucket.org/sjl/bookmarkdown/',
packages=['bookmarkdown'],
scripts = ['bookmarkdown/bookmarkdown'],
install_requires = [
'Markdown',
'baker',
'Jinja2',],
classi | fiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
| 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Pre-processors',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Utilities',
]
)
try:
from setuptools import setup
del setup_info['scripts']
except ImportError:
for unsupported in ('entry_points', 'install_requires'):
del setup_info[unsupported]
from distutils.core import setup
setup(**setup_info)
|
GeoODK/onadata | onadata/libs/utils/timing.py | Python | bsd-2-clause | 1,938 | 0 | import time
import datetime
from itertools import chain
from django.utils import timezone
from onadata.apps.logger.models.attachment import Attachment
from onadata.apps.main.models.meta_data import MetaData
from django.contrib.auth.models import User
def print_time(func):
"""
@print_time
Put this decorator around a function to see how many seconds each
call of this function takes to run.
"""
| def wrapped_func(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
seconds = end - start
print "SECONDS:", seconds, func.__name__, kwargs
return result
return wrapped_func
def get_header_date_format(date_modified):
format = "%a, %d %b %Y %H:%M:%S GMT"
return date_modified.strftime(format)
def get_date(_object=None):
if _object is None:
| return get_header_date_format(timezone.now())
if isinstance(_object, Attachment):
_object = _object.instance
elif isinstance(_object, MetaData):
_object = _object.xform
elif isinstance(_object, User):
_object = _object.profile
_date = _object.date_modified
return get_header_date_format(_date)
def last_modified_header(last_modified_date):
return {'Last-Modified': last_modified_date}
def merge_dicts(*args):
return dict(chain(*[d.items() for d in args]))
def calculate_duration(start_time, end_time):
"""
This function calculates duration when given start and end times.
An empty string is returned if either of the time formats does
not match '_format' format else, the duration is returned
"""
_format = "%Y-%m-%dT%H:%M:%S.%f+03:00"
try:
_start = datetime.datetime.strptime(start_time, _format)
_end = datetime.datetime.strptime(end_time, _format)
except ValueError:
return ''
duration = (_end - _start).total_seconds()
return duration
|
jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_chart_points05.py | Python | bsd-2-clause | 1,577 | 0 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
| self.set_filename('chart_p | oints05.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with point formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [45471616, 46804992]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'marker': {'type': 'automatic'},
'points': [{'fill': {'color': 'red'}}],
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
'marker': {'type': 'automatic'},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
mrjoes/flask-admin | examples/file/app.py | Python | bsd-3-clause | 774 | 0 | import os
import os.path as op
from flask import Flask
fr | om flask.ext import admin
from flask.ext.admin.contrib import fileadmin
# Create flask app
app = Flask(__name__, template_folder='templates', static_folder='files')
# Create dummy secrey key so we can use flash
app.config['SECRET_KEY'] = '12345 | 6790'
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
if __name__ == '__main__':
# Create directory
path = op.join(op.dirname(__file__), 'files')
try:
os.mkdir(path)
except OSError:
pass
# Create admin interface
admin = admin.Admin(app, 'Example: Files')
admin.add_view(fileadmin.FileAdmin(path, '/files/', name='Files'))
# Start app
app.run(debug=True)
|
uclouvain/OSIS-Louvain | ddd/logic/learning_unit/tests/use_case/write/test_update_class_service.py | Python | agpl-3.0 | 8,871 | 0.003269 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import attr
from django.test import SimpleTestCase
from base.ddd.utils.business_validator import MultipleBusinessExceptions
from base.models.enums.learning_unit_year_session import DerogationSession
from ddd.logic.learning_unit.builder.effective_class_identity_builder import EffectiveClassIdentityBuilder
from ddd.logic.learning_unit.commands import UpdateEffectiveClassCommand
from ddd.logic.learning_unit.domain.model.effective_class import EffectiveClassIdentity
from ddd.logic.learning_unit.domain.validator.exceptions import ShouldBeAlphanumericException, \
AnnualVolumeInvalidException, \
TeachingPlaceRequiredException, DerogationQuadrimesterInvalidChoiceException, \
DerogationSessionInvalidChoiceException
from ddd.logic.learning_unit.tests.factory.effective_class import LecturingEffectiveClassFactory
from ddd.logic.learning_unit.tests.factory.learning_unit import CourseWithLecturingVolumesOnly
from ddd.logic.learning_unit.use_case.write.update_effective_class_service import update_effective_class
from infrastructure.learning_unit.repository.in_memory.effective_class import EffectiveClassRepository
from infrastructure.learning_unit.repository.in_memory.learning_unit import LearningUnitRepository
class UpdateClassService(SimpleTestCase):
def setUp(self):
self.learning_unit_repository = LearningUnitRepository()
self.ue_with_lecturing_and_practical_volumes = CourseWithLecturingVolumesOnly()
self.learning_unit_repository.save(self.ue_with_lecturing_and_practical_volumes)
self.effective_class_repository = EffectiveClassRepository()
class_code = 'A'
self.existing_class = LecturingEffectiveClassFactory(
entity_id__class_code=class_code,
entity_id__learning_unit_identity=self.ue_with_lecturing_and_practical_volumes.entity_id,
)
self.effective_class_repository.save(self.existing_class)
self.update_class_cmd = UpdateEffectiveClassCommand(
class_code=class_code,
learning_unit_code=self.ue_with_lecturing_and_practical_volumes.code,
year=self.ue_with_lecturing_and_practical_volumes.year,
volume_first_quadrimester=15.0,
volume_second_quadrimester=5.0,
title_fr='Fr',
title_en='en',
derogation_quadrimester='Q1',
session_derogation=DerogationSession.DEROGATION_SESSION_123.value,
teaching_place_uuid="35bbb236-7de6-4322-a496-fa8397054305"
)
def test_should_class_exists(self):
inexisting_class_code = "W"
cmd = attr.evolve(
self.update_class_cmd,
class_code=inexisting_class_code,
)
with self.assertRaises(AttributeError) as none_type:
update_effective_class(cmd, self.learning_unit_repository, self.effective_class_repository)
self.assertEqual(str(none_type.exception), "'NoneType' object has no attribute 'update'")
def test_should_class_volumes_be_consistent_with_learning_unit_when_q1_and_q2_are_filled(self):
annual_volume = self.ue_with_lecturing_and_practical_volumes.lecturing_part.volumes.volume_annual
bad_repartition = annual_volume + 10.0
cmd = attr.evolve(
self.update_class_cmd,
volume_first_quadrimester=bad_repartition,
volume_second_quadrimester=bad_repartition,
)
with self.assertRaises(MultipleBusinessExceptions) as class_exceptions:
update_effective_class(cmd, self.learning_unit_repository, self.effective_class_repository)
self.assertIsInstance(
class_exceptions.exception.exceptions.pop(),
AnnualVolumeInvalidException
)
def test_should_class_volumes_be_consistent_with_learning_unit_when_only_q2_is_null(self):
annual_volume = self.ue_with_lecturing_and_practical_volumes.lecturing_part.volumes.volume_annual
cmd = attr.evolve(
self.update_class_cmd,
volume_first_quadrimester=annual_volume - 1,
volume_second_quadrimester=None,
)
with self.assertRaises(MultipleBusinessExceptions) as class_exceptions:
update_effective_class(cmd, self.learning_unit_repository, self.effective_class_repository)
self.assertIsInstance(
class_exceptions.exception.exceptions.pop(),
AnnualVolumeInvalidException
)
def test_should_class_volumes_be_consistent_with_learning_unit_when_only_q1_is_null(self):
annual_volume = self.ue_with_lecturing_and_practical_volumes.lecturing_part.volumes.volume_annual
cmd = attr.evolve(
self.update_class_cmd,
volume_first_quadrimest | er=None,
volume_second_quadrimester=annual_volume - 1,
)
with self.assertRaises(MultipleBusinessExceptions | ) as class_exceptions:
update_effective_class(cmd, self.learning_unit_repository, self.effective_class_repository)
self.assertIsInstance(
class_exceptions.exception.exceptions.pop(),
AnnualVolumeInvalidException
)
def test_should_ignore_volumes_consistency_when_no_volumes_encoded(self):
cmd = attr.evolve(
self.update_class_cmd,
volume_first_quadrimester=0.0,
volume_second_quadrimester=0.0,
)
self.assertTrue(
update_effective_class(
cmd,
self.learning_unit_repository,
self.effective_class_repository
)
)
def test_should_teaching_place_be_required(self):
cmd = attr.evolve(
self.update_class_cmd,
teaching_place_uuid=None
)
with self.assertRaises(MultipleBusinessExceptions) as class_exceptions:
update_effective_class(cmd, self.learning_unit_repository, self.effective_class_repository)
self.assertIsInstance(
class_exceptions.exception.exceptions.pop(),
TeachingPlaceRequiredException
)
def test_should_quadrimester_be_valid_choice(self):
cmd = attr.evolve(
self.update_class_cmd,
derogation_quadrimester="invalid choice",
)
with self.assertRaises(MultipleBusinessExceptions) as class_exceptions:
update_effective_class(cmd, self.learning_unit_repository, self.effective_class_repository)
self.assertIsInstance(
class_exceptions.exception.exceptions.pop(),
DerogationQuadrimesterInvalidChoiceException
)
def test_should_session_be_valid_choice(self):
cmd = attr.evolve(
self.update_class_cmd,
session_derogation="invalid choice",
)
with self.assertRaises(MultipleBusinessExceptions) as class_exceptions:
update_effective_class(cmd, self.learning_unit_repository, |
django-rea/nrp | django_rea/api/apps.py | Python | agpl-3.0 | 120 | 0 | from django.apps | import AppConfig
class ApiAppConfig(AppConfig):
| name = 'django_rea.api'
verbose_name = 'Api'
|
timmahrt/praatIO | praatio/klattgrid.py | Python | mit | 8,030 | 0.000249 | """
Functions for reading/writing/manipulating klattgrid files
A klattgrid can be used for speech synthesis/resynthesis.
For more information on the praat klattgrid:
http://www.fon.hum.uva.nl/praat/manual/KlattGrid.html
There are three kinds of data types in a klattgrid:
null tiers (contain no data points -- seem to function as headers for a
set of regular tiers)
regular tiers
embedded tiers
In this code:
null tiers and regular tiers are both represented by KlattPointTier
embedded tiers contain tiers of tiers (3 layers)
A KlattContainerTier contains a list of KlattIntermediateTiers which
contains a list of KlattSubPointTiers. Only the KlattSubPointTiers contain
any points
see **examples/klatt_resynthesis.py**
"""
import io
from os.path import join
from typing import List, Tuple, Optional
from praatio.data_classes.klattgrid import (
Klattgrid,
KlattPointTier,
KlattContainerTier,
KlattSubPointTier,
KlattIntermediateTier,
)
from praatio.utilities import utils
def openKlattgrid(fnFullPath: str) -> Klattgrid:
try:
with io.open(fnFullPath, "r", encoding="utf-16") as fd:
data = fd.read()
except UnicodeError:
with io.open(fnFullPath, "r", encoding="utf-8") as fd:
data = fd.read()
data = data.replace("\r\n", "\n")
# Right now, can only open normal klatt grid and not short ones
kg = _openNormalKlattgrid(data)
return kg
def wavToKlattgrid(
praatEXE: str,
inputFullPath: str,
outputFullPath: str,
timeStep: float = 0.005,
numFormants: int = 5,
maxFormantFreq: float = 5500.0,
windowLength: float = 0.025,
preEmphasis: float = 50.0,
pitchFloor: float = 60.0,
pitchCeiling: float = 600.0,
minPitch: float = 50.0,
subtractMean: bool = True,
scriptFN: Optional[str] = None,
) -> None:
"""
Extracts the klattgrid from a wav file
The default values are the same as the ones used in praat
"""
if subtractMean is True:
subtractMeanStr = "yes"
else:
subtractMeanStr = "no"
if scriptFN is None:
scriptFN = join(utils.scriptsPath, "sound_to_klattgrid.praat")
utils.runPraatScript(
praatEXE,
scriptFN,
[
inputFullPath,
outputFullPath,
timeStep,
numFormants,
maxFormantFreq,
windowLength,
preEmphasis,
pitchFloor,
pitchCeiling,
minPitch,
subtractMeanStr,
],
)
def resynthesize(
praatEXE: str,
wavFN: str,
klattFN: str,
outputWavFN: str,
doCascade: bool = True,
scriptFN: Optional[str] = None,
) -> None:
if doCascade:
method = "Cascade"
else:
method = "Parallel"
if scriptFN is None:
scriptFN = join(utils.scriptsPath, "resynthesize_from_klattgrid.praat")
# Praat crashes | on exit after resynthesis with a klattgrid
utils.runPraatScript(praatEXE, scriptFN, [wavFN, klattFN, outputWavFN, method])
def _openNormalKlattgrid(data: str) -> Klattgrid:
kg = Klattgrid()
# Toss header
data = data.split("\n\n", 1)[1]
# Not sure if this is needed
startI = data.index("points")
startI | = data.index("\n", startI)
# Find sections
sectionIndexList = _findIndicies(data, "<exists>")
sectionIndexList.append(-1)
for i in range(len(sectionIndexList) - 1):
dataTuple = _getSectionHeader(data, sectionIndexList, i)
name, minT, maxT, sectionData, sectionTuple = dataTuple
# Container Tier
if name in [
"oral_formants",
"nasal_formants",
"nasal_antiformants",
"tracheal_formants",
"tracheal_antiformants",
"delta_formants",
"frication_formants",
]:
kct = _proccessContainerTierInput(sectionData, name)
kg.addTier(kct)
else:
# Process entries if this tier has any
entryList = _buildEntryList(sectionTuple)
tier = KlattPointTier(name, entryList, minT, maxT)
kg.addTier(tier)
return kg
def _proccessContainerTierInput(sectionData: str, name: str):
sectionData = sectionData.split("\n", 3)[-1]
formantIndexList = _findIndicies(sectionData, "formants")
subFilterList = [
"bandwidths",
"oral_formants_amplitudes",
"nasal_formants_amplitudes",
"tracheal_formants_amplitudes",
"frication_formants_amplitudes",
]
# Find the index of all the different data sections
subFilterIndexList = [
_findIndicies(sectionData, subName) for subName in subFilterList
]
# 'Formant' search query finds duplicates -- remove them
newFormantList = []
for value in formantIndexList:
if all([value not in subList for subList in subFilterIndexList]):
newFormantList.append(value)
formantIndexList = newFormantList
# Combine regular query with formant query
indexListOfLists = [
formantIndexList,
] + subFilterIndexList
# Flatten index list
masterIndexList = [value for sublist in indexListOfLists for value in sublist]
masterIndexList.sort()
# If an index list is last, it it needs to include '-1' to capture the
# rest of the data
for subList in indexListOfLists:
try:
val = subList[-1]
except IndexError:
continue
ii = masterIndexList.index(val) # Index of the index
try:
subList.append(masterIndexList[ii + 1] - 1)
except IndexError:
subList.append(-1)
# Build the tier structure
kct = KlattContainerTier(name)
for indexList in indexListOfLists:
if indexList == []:
continue
tierList = []
for j in range(len(indexList) - 1):
try:
tmpTuple = _getSectionHeader(sectionData, indexList, j)
except ValueError:
continue
subName, subMin, subMax, _, subTuple = tmpTuple
subName = subName[:-1]
entryList = _buildEntryList(subTuple)
tier = KlattSubPointTier(subName, entryList, subMin, subMax)
tierList.append(tier)
kit = KlattIntermediateTier(subName.split()[0])
for tier in tierList:
kit.addTier(tier)
kct.addTier(kit)
return kct
def _findIndicies(data, keyword):
indexList = utils.findAll(data, keyword)
indexList = [data.rfind("\n", 0, i) for i in indexList]
return indexList
def _getSectionHeader(data, indexList, i):
sectionStart = indexList[i]
sectionEnd = indexList[i + 1]
sectionData = data[sectionStart:sectionEnd].strip()
sectionTuple = sectionData.split("\n", 4)
subheader, minr, maxr = sectionTuple[:3]
name = subheader.split("?")[0].strip()
minT = float(minr.split("=")[1].strip())
maxT = float(maxr.split("=")[1].strip())
tail = sectionTuple[3:]
return name, minT, maxT, sectionData, tail
def _buildEntryList(sectionTuple):
entryList = []
if len(sectionTuple) > 1: # Has points
npoints = float(sectionTuple[0].split("=")[1].strip())
if npoints > 0:
entryList = _processSectionData(sectionTuple[1])
return entryList
def _processSectionData(sectionData: str) -> List[Tuple[float, float]]:
sectionData += "\n"
startI = 0
tupleList = []
while True:
try:
startI = sectionData.index("=", startI) + 1 # Past the equal sign
except ValueError: # No more data
break
endI = sectionData.index("\n", startI)
time = float(sectionData[startI:endI].strip())
startI = sectionData.index("=", endI) + 1 # Just past the '=' sign
endI = sectionData.index("\n", startI)
value = float(sectionData[startI:endI].strip())
startI = endI
tupleList.append((time, value))
return tupleList
|
mojones/Axelrod | axelrod/eigen.py | Python | mit | 2,189 | 0.00137 | """
Compute the principal eigenvector of a matrix using power iteration.
See also numpy.linalg.eig which calculates all the eigenvalues and
eigenvectors.
"""
import numpy
def normalise(nvec):
"""Normalises the given numpy array."""
return nvec / numpy.sqrt(numpy.dot(nvec, nvec))
def squared_error(vector_1, vector_2):
"""Computes the squared error between two numpy arrays."""
diff = vector_1 - vector_2
s = numpy.dot(diff, diff)
return numpy.sqrt(s)
def power_iterati | on(mat, initial):
"""
Generator o | f successive approximations.
Params
------
mat: numpy.matrix
The matrix to use for multiplication iteration
initial: numpy.array, None
The initial state. Will be set to numpy.array([1, 1, ...]) if None
Yields
------
Successive powers (mat ^ k) * initial
"""
vec = initial
while True:
vec = normalise(numpy.dot(mat, vec))
yield vec
def principal_eigenvector(mat, maximum_iterations=None, max_error=1e-8):
"""
Computes the (normalised) principal eigenvector of the given matrix.
Params
------
mat: numpy.matrix
The matrix to use for multiplication iteration
initial: numpy.array, None
The initial state. Will be set to numpy.array([1, 1, ...]) if None
maximum_iterations: int, None
The maximum number of iterations of the approximation
max_error: float, 1e-8
Exit criterion -- error threshold of the difference of successive steps
"""
mat_ = numpy.matrix(mat)
size = mat_.shape[0]
initial = numpy.ones(size)
# Power iteration
if not maximum_iterations:
maximum_iterations = float('inf')
last = initial
for i, vector in enumerate(power_iteration(mat, initial=initial)):
if i > maximum_iterations:
break
if squared_error(vector, last) < max_error:
break
last = vector
# Compute the eigenvalue (Rayleigh quotient)
eigenvalue = numpy.dot(
numpy.dot(mat_, vector), vector) / numpy.dot(vector, vector)
# Liberate the eigenvalue from numpy
eigenvalue = float(eigenvalue)
return (vector, eigenvalue)
|
wolverineav/networking-bigswitch | networking_bigswitch/plugins/bigswitch/db/migration/alembic_migrations/versions/kilo_add_base_bsn_plugin.py | Python | apache-2.0 | 5,765 | 0 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add base BSN plugin
Revision ID: kilo
Revises:
Create Date: 2016-01-04 17:59:34.311932
"""
from alembic import op
from oslo_serialization import jsonutils
import sqlalchemy as sa
from sqlalchemy.dialects.mysql.base import VARCHAR
from sqlalchemy.types import Enum, TIMESTAMP, TypeDecorator
# revision identifiers, used by Alembic.
revision = 'kilo'
down_revision = None
branch_labels = None
depends_on = None
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string.
Usage::
JSONEncodedDict(255)
"""
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = jsonutils.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = jsonutils.loads(value)
return value
def upgrade():
op.create_table(
'networktemplates',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('body', sa.Text(), nullable=False),
sa.Column('name', sa.String(255), nullable=False, unique=True))
op.create_table(
'networktemplateassignments',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('tenant_id', sa.String(255), nullable=False, unique=True),
sa.Column('stack_id', sa.String(255), nullable=False),
sa.Column('template_id', sa.String(length=36),
sa.ForeignKey('networktemplates.id'),
nullable=False),)
op.create_table(
'reachabilitytest',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('tenant_id', sa.String(255), nullable=False),
sa.Column('name', sa.String(64), nullable=False, unique=True),
sa.Column('src_tenant_name', sa.String(255), nullable=False),
sa.Column('src_segment_name', sa.String(255), nullable=False),
sa.Column('src_ip', sa.String(16), nullable=False),
sa.Colu | mn('dst_ip', sa.String(16), nullable=False),
sa.Column('expected_result',
Enum("reached destination", "dropped by route",
"dropped by policy", "dropped due to private segment",
"packet in", "forwarded", "dropped", "multipl | e sources",
"unsupported", "invalid input", name="expected_result"),
nullable=False),
sa.Column('test_time', TIMESTAMP(timezone=True), nullable=True),
sa.Column('test_result', Enum("pass", "fail", "pending"),
nullable=False, default="pending"),
sa.Column('detail', JSONEncodedDict(8192), nullable=True),
sa.Column('run_test', sa.Boolean, nullable=False, default=False))
op.create_table(
'reachabilityquicktest',
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('tenant_id', sa.String(255), nullable=False),
sa.Column('name', sa.String(64), nullable=False, unique=True),
sa.Column('src_tenant_name', sa.String(255), nullable=False),
sa.Column('src_segment_name', sa.String(255), nullable=False),
sa.Column('src_ip', sa.String(16), nullable=False),
sa.Column('dst_ip', sa.String(16), nullable=False),
sa.Column('expected_result',
Enum("reached destination", "dropped by route",
"dropped by policy", "dropped due to private segment",
"packet in", "forwarded", "dropped", "multiple sources",
"unsupported", "invalid input", name="expected_result"),
nullable=False),
sa.Column('test_time', TIMESTAMP(timezone=True), nullable=True),
sa.Column('test_result', Enum("pass", "fail", "pending"),
nullable=False, default="pending"),
sa.Column('detail', JSONEncodedDict(8192), nullable=True),
sa.Column('run_test', sa.Boolean, nullable=False, default=False),
sa.Column('save_test', sa.Boolean, nullable=False, default=False))
op.create_table(
'bsn_routerrules',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('priority', sa.Integer(), nullable=False),
sa.Column('source', sa.String(length=64), nullable=False),
sa.Column('destination', sa.String(length=64), nullable=False),
sa.Column('action', sa.String(length=10), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('priority', 'router_id', name='unique_prio_rid'))
op.create_table(
'bsn_nexthops',
sa.Column('rule_id', sa.Integer(), nullable=False),
sa.Column('nexthop', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['rule_id'], ['bsn_routerrules.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('rule_id', 'nexthop'))
def downgrade():
pass
|
quasiyoke/telegram-bot-server | telegram_bot_server/server.py | Python | lgpl-3.0 | 6,225 | 0.002249 | # telegram-bot-server
# Copyright (C) 2017 quasiyoke
#
# You should have received a copy of the GNU Lesser General Public License v3
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import logging
import traceback
from .error import BotError
from .middleware import AuthMiddleware, ErrorMiddleware, RequestDataMiddleware
from .response import Response
from aiohttp import web
from http import HTTPStatus
__all__ = ('Server', )
LOGGER = logging.getLogger('telegram_bot_server.server')
TOKEN_PATTERN = r'{token:[1-9]\d{0,15}:[\w\-]{10,60}}'
class Server:
def __init__(self, bot_service, configuration, update_service):
self._bot_service = bot_service
self._configuration = configuration
self._update_service = update_service
self._update_service.set_update_handler(self._on_update)
self._updates_futures = {}
self._app = web.Application(
middlewares=self._get_middlewares(),
)
self._setup_routes()
self._app.on_cleanup.append(self._on_cleanup)
def _get_middlewares(self):
return (
ErrorMiddleware(),
RequestDataMiddleware(),
AuthMiddleware(self._bot_service),
)
async def _handle_delete_webhook(self, request):
"""
Raises:
aiohttp.web.HTTPException
"""
LOGGER.info('"deleteWebhook" method.')
description = 'Webhook was deleted' if await request.bot.delete_webhook() else 'Webhook is already deleted'
LOGGER.debug(description)
return Response(description=description)
async def _handle_get_updates(self, request):
"""
Raises:
aiohttp.web.HTTPException
"""
LOGGER.info('"getUpdates" method for %s.', request.bot.bot_id)
if request.bot.webhook:
reason = 'Conflict: can\'t use getUpdates method while webhook is active'
LOGGER.warning('%s. Bot ID %s.', reason, request.bot.bot_id)
raise web.HTTPConflict(reason=reason)
try:
offset = int(request.data['offset'])
except (KeyError, TypeError, ValueError):
pass
else:
await self._update_service.confirm_updates(request.bot.bot_id, offset)
updates = await self._update_service.get_not_confirmed_updates(request.bot.bot_id)
updates_dicts = [update.dict for update in updates]
if len(updates_dicts):
return Response(result=updates_dicts)
try:
timeout = int(request.data['timeout'])
except (KeyError, TypeError, ValueError):
pass
else:
if timeout > 0:
updates_future = asyncio.Future()
if request.bot.bot_id in self._updates_futures:
reason = 'Conflict: another request for updates is still active'
LOGGER.warning('%s. Bot ID %s.', reason, request.bot.bot_id)
raise web.HTTPConflict(reason=reason)
self._updates_futures[request.bot.bot_id] = updates_future
try:
await asyncio.wait_for(updates_future, timeout=timeout)
except asyncio.TimeoutError:
del self._updates_futures[request.bot.bot_id]
except Exception as err:
del self._updates_futures[request.bot.bot_id]
LOGGER.warning('Exception during waiting for updates.')
traceback.print_exc()
else:
del self._updates_futures[request.bot.bot_id]
updates = updates_future.result()
if len(updates):
await self._update_service.confirm_updates(request.bot.bot_id, updates[-1].bot_id + 1)
updates_dicts = [update.dict for update in updates]
return Response(result=updates_dicts)
async def _handle_send_message(self, request):
"""
Raises:
aiohttp.web.HTTPException
"""
LOGGER.info('"sendMessage" method.')
return Response()
async def _handle_set_webhook(self, request):
"""
Raises:
aiohttp.web.HTTPException
"""
LOGGER.info('"setWebhook" method.')
try:
url = request.data['url']
except (KeyError, TypeError) as err:
reason = 'Bad Request: URL isn\'t specified'
LOGGER.warning('%s. Request data were "%s". %s', reason, request.data, err)
raise web.HTTPBadRequest(reason=reason) from err
description = 'Webhook was set' if await request.bot.set_webhook(url) else 'Webhook is already set'
LOGGER.debug(description)
return Response(description=description)
async def _on_cleanup(self, app):
pass
async def _on_update(self, bot_id):
LOGGER.info('Update %s', bot_id)
bot = await self._bot_service.get_bot(bot_id)
updates = await self._update_service.get_not_confirmed_updates(bot_id)
if not len(updates):
return
if bot.webhook:
try:
await bot.push_updates(updates)
except BotError as err:
LOGGER.warning('Problems during pushing updates to webhook. %s', err)
else:
self._update_service.confirm_updates(updates[-1].update_id + 1)
else:
updates_future = self._updates_futures.get(bot_id)
| if updates_future is not None:
updates_future.set_result(updates)
def _setup_routes(self):
self._app.router.add_route('*', f'/bot{TOKEN_PATTERN}/deleteWebhook', self._handle_delete_webhook)
self._app.router.add_route('*', f'/bot{TOKEN_PATTERN}/getUpdates', self._handle_get_updates)
self._app.router.add_route('*', f'/bot{TOKEN | _PATTERN}/sendMessage', self._handle_send_message)
self._app.router.add_route('*', f'/bot{TOKEN_PATTERN}/setWebhook', self._handle_set_webhook)
def run(self):
web.run_app(
self._app,
host='0.0.0.0',
port=self._configuration.port,
)
|
tonygalmiche/is_plastigray | wizard/assistent_report.py | Python | mit | 2,803 | 0.014695 | # -*- coding: utf-8 -*-
from openerp import models,fields,api
import datetime
import time
class assistent_report1(models.TransientModel):
_name = "assistent.report1"
def date_debut_mois():
now = datetime.date.today() # Date du jour
date_debut_mois = datetime.datetime( now.year, now.month, 1 ) # Premier jour du mois
return date_debut_mois.strftime('%Y-%m-%d') # Formatage
site= fields.Selection([
("1", "Gray"),
("4", "ST-Brice"),
], "Site", required=True)
version = fields.Selection([
("1", "1"),
("2", "2"),
("3", "3"),
], "Version du rapport", required=True, default="2")
type_rapport= fields.Selection([
("rapport_mois", "Liste mensuelle"),
("rapport_date_a_date", "Liste de date à date"),
("rapport_a_date", "Liste à date")
], "Modèle de rapport", required=True)
date_jour = fields.Date("Date", required=False)
date_mois = fields.Date("Date dans le mois", required=False)
date_debut = fields.Date("Date de début", required=False)
date_fin = fields.Date("Date de fin", required=False)
employee = fields.Many2one('hr.employee', 'Employé', required=False, ondelete='set null', help="Sélectionnez un employé")
interimaire = fields.Boolean('Intérimaire', help="Cocher pour sélectionner uniquement les intérimaires")
saut_page = fields.Boolean('Saut de page', help="Cocher pour avoir un saut de page pour chaque employé")
detail = fields.Boolean("Vue détaillée")
_defaults = {
'date_jour': time.strftime('%Y-%m-%d'),
'date_mois': date_debut_mois(),
'date_debut': date_debut_mois(),
'date_fin': time.strftime('%Y-%m-%d'),
'type_rapport': 'rapport_mois',
}
def assistent_report1(self, cr, uid, ids, context=None):
report_data = self.browse(cr, uid, ids[0])
report_link = "http://odoo/odoo-rh/rapport"+ str(report_data.version)+".php"
url = str(report_link) + '?'+ '&type_rapport=' + str(report_data.type_rapport)+'&site=' + str(repo | rt_data.site)+ '&date_jour=' + str(report_data.date_jour)+ '&date_mois=' + str(report_data.date_mois)+'&detail='+str(report_data.detail)+'&employee='+str(report_data.employee.id)+'&interimaire='+str(report_data.interimaire)+'&saut_page='+str(report_data.saut_page)+ '&date_debut=' + str(report_data.date_debut)+ '&date_fin=' + str(repor | t_data.date_fin)
return {
'name' : 'Go to website',
'res_model': 'ir.actions.act_url',
'type' : 'ir.actions.act_url',
'target' : 'current',
'url' : url
}
|
duedil-ltd/pyfilesystem | fs/tests/__init__.py | Python | bsd-3-clause | 46,382 | 0.000539 | #!/usr/bin/env python
"""
fs.tests: testcases for the fs module
"""
from __future__ import with_statement
# Send any output from the logging module to stdout, so it will
# be captured by nose and reported appropriately
import sys
import logging
logging.basicConfig(level=logging.ERROR, stream=sys.stdout)
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.filelike import StringIO
import datetime
import unittest
import os
import os.path
import pickle
import random
import copy
import time
try:
import threading
except ImportError:
import dummy_threading as threading
import six
from six import PY3, b
class FSTestCases(object):
"""Base suite of testcases for filesystem implementations.
Any FS subclass should be capable of passing all of these tests.
To apply the tests to your own FS implementation, simply use FSTestCase
as a mixin for your own unittest.TestCase subclass and have the setUp
method set self.fs to an instance of your FS implementation.
NB. The Filesystem being tested must have a capacity of at least 3MB.
This class is designed as a mixin so that it's not detected by test
loading tools such as nose.
"""
def check(self, p):
"""Check that a file exists within self.fs"""
return self.fs.exists(p)
def test_invalid_chars(self):
"""Check paths validate ok"""
# Will have to be overriden selectively for custom validepath methods
self.assertEqual(self.fs.validatepath(''), None)
self.assertEqual(self.fs.validatepath('.foo'), None)
self.assertEqual(self.fs.validatepath('foo'), None)
self.assertEqual(self.fs.validatepath('foo/bar'), None)
self.assert_(self.fs.isvalidpath('foo/bar'))
def test_meta(self):
"""Checks getmeta / hasmeta are functioning"""
# getmeta / hasmeta are hard to test, since there is no way to validate
# the implementation's response
meta_names = ["read_only",
"network",
"unicode_paths"]
stupid_meta = 'thismetashouldnotexist!"r$$%^&&*()_+'
self.assertRaises(NoMetaError, self.fs.getmeta, stupid_meta)
self.assertFalse(self.fs.hasmeta(stupid_meta))
self.assertEquals(None, self.fs.getmeta(stupid_meta, None))
self.assertEquals(3.14, self.fs.getmeta(stupid_meta, 3.14))
for meta_name in meta_names:
try:
meta = self.fs.getmeta(meta_name)
self.assertTrue(self.fs.hasmeta(meta_name))
except NoMetaError:
self.assertFalse(self.fs.hasmeta(meta_name))
def test_root_dir(self):
self.assertTrue(self.fs.isdir(""))
self.assertTrue(self.fs.isdir("/"))
# These may be false (e.g. empty dict) but mustn't raise errors
self.fs.getinfo("")
self.assertTrue(self.fs.getinfo("/") is not None)
def test_getsyspath(self):
try:
syspath = self.fs.getsyspath("/")
except NoSysPathError:
pass
else:
self.assertTrue(isinstance(syspath, unicode))
syspath = self.fs.getsyspath("/", allow_none=True)
if syspath is not None:
self.assertTrue(isinstance(syspath, unicode))
def test_debug(self):
str(self.fs)
repr(self.fs)
self.assert_(hasattr(self.fs, 'desc'))
def test_open_on_directory(self):
self.fs.makedir("testdir")
try:
f = self.fs.open("testdir")
except ResourceInvalidError:
pass
except Exception:
raise
ecls = sys.exc_info()[0]
assert False, "%s raised instead of ResourceInvalidError" % (ecls,)
else:
f.close()
assert False, "ResourceInvalidError was not raised"
def test_writefile(self):
self.assertRaises(ResourceNotFoundError, self.fs.open, "test1.txt")
f = self.fs.open("test1.txt", "wb")
f.write(b("testing"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEquals(f.read(), b("testing"))
f.close()
f = self.fs.open("test1.txt", "wb")
f.write(b("test file overwrite"))
f.close()
self.assertTrue(self.check("test1.txt"))
f = self.fs.open("test1.txt", "rb")
self.assertEquals(f.read(), b("test file overwrite"))
f.close()
def test_createfile(self):
test = b('now with content')
self.fs.createfile("test.txt")
self.assert_(self.fs.exists("test.txt"))
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
self.fs.setcontents("test.txt", test)
self.fs.createfile("test.txt")
self.assertEqual(self.fs.getcontents("test.txt", "rb"), test)
self.fs.createfile("test.txt", wipe=True)
self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
def test_setcontents(self):
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"))
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(b("to you, good sir!")))
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
# setcontents() should accept both a string...
self.fs.setcontents("hello", b("world"), chunk_size=2)
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents("hello", StringIO(
b("to you, good sir!")), chunk_size=2)
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
self.fs.setcontents("hello", b(""))
self.assertEquals(self.fs.getcontents("hello", "rb"), b(""))
def test_setcontents_async(self):
# setcontents() should accept both a string...
self.fs.setcontents_async("hello", b("world")).wait()
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!"))).wait()
self.assertEquals(self.fs.getcontents("hello"), b("to you, good sir!"))
self.fs.setcontents_async("hello", b("world"), chunk_size=2).wait()
self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
# ...and a file-like object
self.fs.setcontents_async("hello", StringIO(
b("to you, good sir!")), chunk_size=2).wait()
self.assertEquals(self.fs.getcontents(
"hello", "rb"), b("to you, good sir!"))
def test_isdir_isfile(self):
self.assertFalse(self.fs.exists("dir1"))
self.assertFalse(self.fs.isdir("dir1"))
self.assertFalse(self.fs.isfile("a.txt"))
self.fs.setcontents("a.txt", b(''))
self.assertFalse(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.assertTrue(self.fs.isfile("a.txt"))
self.assertFalse(self.fs.exists("a.txt/thatsnotadir"))
self.fs.makedir("dir1")
self.asse | rtTrue(self.fs.isdir("dir1"))
self.assertTrue(self.fs.exists("dir1"))
self.assertTrue(self.fs.exists("a.txt"))
self.fs.remove("a.tx | t")
self.assertFalse(self.fs.exists("a.txt"))
def test_listdir(self):
def check_unicode(items):
for item in items:
self.assertTrue(isinstance(item, unicode))
self.fs.setcontents(u"a", b(''))
self.fs.setcontents("b", b(''))
self.fs.setcontents("foo", b(''))
self.fs.setcontents("bar", b(''))
# Test listing of the root directory
d1 = self.fs.listdir()
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
d1 = self.fs.listdir("")
self.assertEqual(len(d1), 4)
self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
check_unicode(d1)
|
krassowski/Anki-Night-Mode | night_mode/__init__.py | Python | gpl-3.0 | 577 | 0.006932 | from aqt import mw
from anki.hoo | ks import addHook
#addons should selectively load before or after a delay of 666
NM_RESERVED_DELAY = 666
night_mode = None
def delayedLoader():
"""
Delays loading of NM to avoid addon conflicts.
"""
global night_mode
from .night_mode import NightMode
night_mode = NightMode()
night_mode.load()
def onProfileLoaded():
| if not night_mode:
mw.progress.timer(
NM_RESERVED_DELAY, delayedLoader, False
)
else:
night_mode.load()
addHook('profileLoaded', onProfileLoaded)
|
log2timeline/dfwinreg | run_tests.py | Python | apache-2.0 | 1,027 | 0.009737 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to run the tests."""
from __future__ import print_function
import sys
import unittest
# Change PYTHONPATH to include dependencies.
sys.path.insert(0, '.')
import utils.dependencies # pylint: disable=wrong-import-position
if __name__ == '__main__':
print('Using Python version {0!s}'.format(sys.version))
fail_unless_has_test_file = '--fail-unless-has-test-file' in sys.argv
setattr(unittest, 'fail_unless_has_test_file', fail_unless_has_test_file)
if fail_unless_has_test_file:
# Remove --fail-unless-has-test-file otherwise it will conflict with
# the argparse tests.
sys.argv.remove('--fail-unless-has-te | st-file')
dependency_helper = utils.dependencies.DependencyHelper()
if not dependency_helper.CheckTestDependencies():
sys.exit(1)
test_suite = unittest.TestLoader().discover('tests', pattern='*.py')
tes | t_results = unittest.TextTestRunner(verbosity=2).run(test_suite)
if not test_results.wasSuccessful():
sys.exit(1)
|
Yelp/docker-custodian | tests/docker_autostop_test.py | Python | apache-2.0 | 2,726 | 0 | try:
from unittest import mock
except ImportError:
import mock
from docker_custodian.docker_autostop import (
build_container_matcher,
get_opts,
has_been_running_since,
main,
stop_container,
stop_containers,
)
def test_stop_containers(mock_client, container, now):
matcher = mock.Mock()
mock_client.containers.return_value = [container]
mock_client.inspect_container.return_value = container
stop_containers(mock_client, now, matcher, False)
matcher.assert_called_once_with('container_name')
mock_client.stop.assert_called_once_with(container['Id'])
def test_stop_container(mock_client):
id = 'asdb'
stop_container(mock_client, id)
mock_client.stop.assert_called_once_w | ith(id)
def test_build_container_matcher():
prefixes = ['one_', 'two_']
matcher = build_container_matcher(prefixes)
as | sert matcher('one_container')
assert matcher('two_container')
assert not matcher('three_container')
assert not matcher('one')
def test_has_been_running_since_true(container, later_time):
assert has_been_running_since(container, later_time)
def test_has_been_running_since_false(container, earlier_time):
assert not has_been_running_since(container, earlier_time)
@mock.patch('docker_custodian.docker_autostop.build_container_matcher',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.stop_containers',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.get_opts',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.docker', autospec=True)
def test_main(
mock_docker,
mock_get_opts,
mock_stop_containers,
mock_build_matcher
):
mock_get_opts.return_value.timeout = 30
main()
mock_get_opts.assert_called_once_with()
mock_build_matcher.assert_called_once_with(
mock_get_opts.return_value.prefix)
mock_stop_containers.assert_called_once_with(
mock.ANY,
mock_get_opts.return_value.max_run_time,
mock_build_matcher.return_value,
mock_get_opts.return_value.dry_run)
def test_get_opts_with_defaults():
opts = get_opts(args=['--prefix', 'one', '--prefix', 'two'])
assert opts.timeout == 60
assert opts.dry_run is False
assert opts.prefix == ['one', 'two']
assert opts.max_run_time is None
def test_get_opts_with_args(now):
with mock.patch(
'docker_custodian.docker_autostop.timedelta_type',
autospec=True
) as mock_timedelta_type:
opts = get_opts(args=['--prefix', 'one', '--max-run-time', '24h'])
assert opts.max_run_time == mock_timedelta_type.return_value
mock_timedelta_type.assert_called_once_with('24h')
|
tobiashochguertel/postman-proxy_server.py | twisted-proxy.py | Python | apache-2.0 | 2,098 | 0.004766 | #!/usr/bin/python
from twisted.internet import reactor
from twisted.web import http
from twisted.web.proxy import Proxy, ProxyRequest, ProxyClientFactory, ProxyClient
from PIL import ImageFile
from PIL.ImageFile import Par | ser
from StringIO import StringIO
class InterceptingProxyClient(ProxyClient):
def __init__(self, *args, **kwargs):
ProxyClient.__init__(self, *args, **kwargs)
self.image_parser = None
def handleHeader(self, key, value):
if key == "Content-Type" and value in ["image/jpeg", | "image/gif", "image/png"]:
self.image_parser = Parser()
if key == "Content-Length" and self.image_parser:
pass
else:
ProxyClient.handleHeader(self, key, value)
def handleEndHeaders(self):
if self.image_parser:
pass #Need to calculate and send Content-Length first
else:
ProxyClient.handleEndHeaders(self)
def handleResponsePart(self, buffer):
if self.image_parser:
self.image_parser.feed(buffer)
else:
ProxyClient.handleResponsePart(self, buffer)
def handleResponseEnd(self):
if self.image_parser:
image = self.image_parser.close()
try:
format = image.format
image = image.rotate(180)
s = StringIO()
image.save(s, format)
buffer = s.getvalue()
except:
buffer = ""
ProxyClient.handleHeader(self, "Content-Length", len(buffer))
ProxyClient.handleEndHeaders(self)
ProxyClient.handleResponsePart(self, buffer)
ProxyClient.handleResponseEnd(self)
class InterceptingProxyClientFactory(ProxyClientFactory):
protocol = InterceptingProxyClient
class InterceptingProxyRequest(ProxyRequest):
protocols = {'http': InterceptingProxyClientFactory}
class InterceptingProxy(Proxy):
requestFactory = InterceptingProxyRequest
factory = http.HTTPFactory()
factory.protocol = InterceptingProxy
reactor.listenTCP(8000, factory)
reactor.run()
|
google-research/runtime-error-prediction | core/models/ipagnn.py | Python | apache-2.0 | 6,218 | 0.004986 | """IPA-GNN models."""
from typing import Any
from flax import linen as nn
import jax
import jax.numpy as jnp
from core.data import error_kinds
from core.modules.ipagnn import compressive_ipagnn
from core.modules.ipagnn import encoder
from core.modules.ipagnn import ipagnn
from core.modules.ipagnn import logit_math
from core.modules.ipagnn import spans
from core.modules.ipagnn import raise_contributions as raise_contributions_lib
from third_party.flax_examples import transformer_modules
class IPAGNN(nn.Module):
config: Any
info: Any
transformer_config: transformer_modules.TransformerConfig
docstring_transformer_config: transformer_modules.TransformerConfig
def setup(self):
config = self.config
vocab_size = self.info.vocab_size
max_tokens = config.max_tokens
max_num_nodes = config.max_num_nodes
max_num_edges = config.max_num_edges
max_steps = config.max_steps
self.node_span_encoder = spans.NodeSpanEncoder(
info=self.info,
config=config,
transformer_config=self.transformer_config,
max_tokens=max_tokens,
max_num_nodes=max_num_nodes,
use_span_index_encoder=False,
use_span_start_indicators=False,
)
if config.use_film or config.use_cross_attention:
self.docstring_token_encoder = encoder.TokenEncoder(
transformer_config=self.docstring_transformer_config,
| num_embeddings=vocab_size,
feature | s=config.hidden_size,
)
self.docstring_encoder = encoder.TransformerEncoder(
config=self.docstring_transformer_config)
if config.use_compressive_ipagnn:
self.ipagnn = compressive_ipagnn.SkipIPAGNN(
config=config,
info=self.info,
max_steps=max_steps,
)
else:
self.ipagnn = ipagnn.IPAGNNModule(
info=self.info,
config=config,
max_steps=max_steps,
)
@nn.compact
def __call__(self, x):
config = self.config
info = self.info
tokens = x['tokens']
docstring_tokens = x['docstring_tokens']
# tokens.shape: batch_size, max_tokens
batch_size = tokens.shape[0]
encoded_inputs = self.node_span_encoder(
tokens, x['node_token_span_starts'], x['node_token_span_ends'],
x['num_nodes'])
# encoded_inputs.shape: batch_size, max_num_nodes, hidden_size
if config.use_film or config.use_cross_attention:
docstring_token_embeddings = self.docstring_token_encoder(
docstring_tokens)
docstring_mask = docstring_tokens > 0
docstring_encoder_mask = nn.make_attention_mask(
docstring_mask, docstring_mask, dtype=jnp.float32)
# docstring_token_embeddings.shape: batch_size, max_tokens, hidden_size
docstring_embeddings = self.docstring_encoder(
docstring_token_embeddings,
encoder_mask=docstring_encoder_mask)
else:
docstring_embeddings = None
docstring_mask = None
ipagnn_output = self.ipagnn(
node_embeddings=encoded_inputs,
docstring_embeddings=docstring_embeddings,
docstring_mask=docstring_mask,
edge_sources=x['edge_sources'],
edge_dests=x['edge_dests'],
edge_types=x['edge_types'],
true_indexes=x['true_branch_nodes'],
false_indexes=x['false_branch_nodes'],
raise_indexes=x['raise_nodes'],
start_node_indexes=x['start_index'],
exit_node_indexes=x['exit_index'],
post_domination_matrix=x['post_domination_matrix'],
step_limits=x['step_limit'],
)
# ipagnn_output['exit_node_embeddings'].shape: batch_size, hidden_size
# ipagnn_output['raise_node_embeddings'].shape: batch_size, hidden_size
# ipagnn_output['exit_node_instruction_pointer'].shape: batch_size
# ipagnn_output['raise_node_instruction_pointer'].shape: batch_size
exit_node_embeddings = ipagnn_output['exit_node_embeddings']
# exit_node_embeddings.shape: batch_size, hidden_size
exit_node_instruction_pointer = ipagnn_output['exit_node_instruction_pointer']
# exit_node_instruction_pointer.shape: batch_size
num_classes = info.num_classes
if config.raise_in_ipagnn:
raise_node_embeddings = ipagnn_output['raise_node_embeddings']
# raise_node_embeddings.shape: batch_size, hidden_size
raise_node_instruction_pointer = ipagnn_output['raise_node_instruction_pointer']
# raise_node_instruction_pointer.shape: batch_size
if len(info.no_error_ids) == 1:
# Multiple error classes; only one No-Error class.
no_error_id = info.no_error_ids[0]
logits = nn.Dense(
features=num_classes, name='output'
)(raise_node_embeddings) # P(e | yes exception)
# logits.shape: batch_size, num_classes
logits = logits.at[:, no_error_id].set(-jnp.inf)
no_error_logits = jax.vmap(logit_math.get_additional_logit)(
exit_node_instruction_pointer + 1e-9,
raise_node_instruction_pointer + 1e-9,
logits)
# no_error_logits.shape: batch_size
logits = logits.at[:, no_error_id].set(no_error_logits)
elif len(info.no_error_ids) > 1:
# Multiple No-Error classes; only one error class.
if len(info.error_ids) > 1:
raise NotImplementedError('Multiple error classes and multiple no-error classes.')
assert len(info.error_ids) == 1
error_id = info.error_ids[0]
logits = nn.Dense(
features=num_classes, name='output'
)(exit_node_embeddings) # P(e | no exception)
# logits.shape: batch_size, num_classes
logits = logits.at[:, error_id].set(-jnp.inf)
error_logits = jax.vmap(logit_math.get_additional_logit)(
raise_node_instruction_pointer + 1e-9,
exit_node_instruction_pointer + 1e-9,
logits)
# error_logits.shape: batch_size
logits = logits.at[:, error_id].set(error_logits)
else:
raise ValueError('Tried using Exception IPA-GNN on data with no errors.')
else:
logits = nn.Dense(
features=num_classes, name='output'
)(exit_node_embeddings)
# logits.shape: batch_size, num_classes
return logits, ipagnn_output
|
hyesun03/k-board | kboard/board/models.py | Python | mit | 3,211 | 0.000626 | from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from core.models import TimeStampedModel
from accounts.models import Account
class Board(models.Model):
def __str__(self):
return 'Board Name: ' + self.name
def get_absolute_url(self):
return reverse('board:post_list', args=[self.slug])
slug = models.CharField(default='', unique=True, max_length=100)
name = models.CharField(default='', max_length=100)
posts_chunk_size = models.IntegerField(default=10)
post_pages_nav_chunk_size = models.IntegerField(default=10)
comments_chunk_size = models.IntegerField(default=5)
comment_pages_nav_chunk_size = models.IntegerField(default=10)
class Pos | tQuerySet(models.QuerySet):
| def search(self, search_flag, query):
if search_flag == 'TITLE':
return self.filter(title__contains=query)
elif search_flag == 'CONTENT':
return self.filter(content__contains=query)
elif search_flag == 'BOTH':
return self.filter(Q(title__contains=query) | Q(content__contains=query))
else:
return self.all()
def remain(self):
return self.filter(is_deleted=False)
def board(self, board):
return self.filter(board=board)
class PostManager(models.Manager):
def get_queryset(self):
return PostQuerySet(self.model, using=self._db)
def search(self, search_flag, query):
return self.get_queryset().search(search_flag, query)
def remain(self):
return self.get_queryset().remain()
def board(self, board):
return self.get_queryset().board(board)
class Post(TimeStampedModel):
def __str__(self):
return 'Post Title: ' + self.title
SEARCH_FLAG = [
('TITLE', '제목'),
('CONTENT', '내용'),
('BOTH', '제목+내용')
]
objects = PostManager()
title = models.CharField(blank=False, max_length=100)
content = models.TextField(default='')
board = models.ForeignKey(Board, null=True)
is_deleted = models.BooleanField(default=False)
page_view_count = models.IntegerField(default=0)
like_count = models.IntegerField(default=0)
account = models.ForeignKey(Account, null=True)
ip = models.GenericIPAddressField(null=True, default='')
def get_absolute_url(self):
return reverse('board:view_post', args=[self.id])
class EditedPostHistory(TimeStampedModel):
post = models.ForeignKey(Post, null=False, default=None)
title = models.CharField(default='', max_length=100)
content = models.TextField(default='')
ip = models.GenericIPAddressField(null=True, default='')
class Attachment(models.Model):
post = models.ForeignKey(Post, null=True)
editedPostHistory = models.ForeignKey(EditedPostHistory, null=True, default=None)
attachment = models.FileField(blank=True, null=True)
class Comment(TimeStampedModel):
content = models.TextField(default='')
post = models.ForeignKey(Post, null=True)
is_deleted = models.BooleanField(default=False)
account = models.ForeignKey(Account, null=True)
ip = models.GenericIPAddressField(null=True, default='')
|
qedsoftware/commcare-hq | corehq/ex-submodules/casexml/apps/phone/const.py | Python | bsd-3-clause | 738 | 0 | # how long a cached payload sits around for (in seconds).
INITIAL_SYNC_CACHE_TIMEOUT = 60 * 60 # 1 hour
# the threshold for setting a cached payload on initial sync (in | seconds).
# restores that take less than this time will not be cached to allow
# for rapid iteration on fixtures/cases/etc.
INITIAL_SYNC_CACHE_THRESHOLD = 60 # 1 minute
# if a sync is happening asynchronously, we wait for this long for a result to
# initially be returned, otherwise we return a 202
INITIAL_ASYNC_TIMEOUT_THRESHOLD = 10
# The Retry-After hea | der parameter. Ask the phone to retry in this many seconds
# to see if the task is done.
ASYNC_RETRY_AFTER = 30
ASYNC_RESTORE_CACHE_KEY_PREFIX = "async-restore-task"
RESTORE_CACHE_KEY_PREFIX = "ota-restore"
|
quequino/Revolution | plugin.program.vpnicity/yt.py | Python | gpl-2.0 | 16,924 | 0.012527 |
#
# Copyright (C) 2013 Sean Poyser
#
#
# This code is a derivative of the YouTube plugin for XBMC
# released under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3
# Copyright (C) 2010-2012 Tobias Ussing And Henrik Mosgaard Jensen
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
# 5: "240p h263 flv container",
# 18: "360p h264 mp4 container | 270 for rtmpe?",
# 22: "720p h264 mp4 container",
# 26: "???",
# 33: "???",
# 34: "360p h264 flv container",
# 35: "480p h264 flv container",
# 37: "1080p h264 mp4 container",
# 38: "720p vp8 webm container",
# 43: "360p h264 flv container",
# 44: "480p vp8 webm container",
# 45: "720p vp8 webm container",
# 46: "520p vp8 webm stereo",
# 59: "480 for rtmpe",
# 78: "seems to be around 400 for rtmpe",
# 82: "360p h264 stereo",
# 83: "240p h264 stereo",
# 84: "720p h264 stereo",
# 85: "520p h264 stereo",
# 100: "360p vp8 webm stereo",
# 101: "480p vp8 webm stereo",
# 102: "720p vp8 webm stereo",
# 120: "hd720",
# 121: "hd1080"
import re
import urllib2
import urllib
import cgi
import HTMLParser
try: import simplejson as json
except ImportError: import json
MAX_REC_DEPTH = 5
def Clean(text):
text = text.replace('–', '-')
text = text.replace('’', '\'')
text = text.replace('“', '"')
text = text.replace('”', '"')
text = text.replace(''', '\'')
text = text.replace('<b>', '')
text = text.replace('</b>', '')
text = text.replace('&', '&')
text = text.replace('\ufeff', '')
return text
def PlayVideo(id, forcePlayer=False):
import xbmcgui
import sys
import utils
busy = utils.showBusy()
video, links = GetVideoInformation(id)
if busy:
busy.close()
if 'best' not in video:
return False
url = video['best']
title = video['title']
image = video['thumbnail']
liz = xbmcgui.ListItem(title, iconImage=image, thumbnailImage=image)
liz.setInfo( type="Video", infoLabels={ "Title": title} )
if forcePlayer or len(sys.argv) < 2 or int(sys.argv[1]) == -1:
import xbmc
pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
pl.clear()
pl.add(url, liz)
xbmc.Player().play(pl)
else:
import xbmcplugin
liz.setPath(url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, liz)
return True
def GetVideoInformation(id):
#id = 'H7iQ4sAf0OE' #test for HLSVP
#id = 'ofHlUJuw8Ak' #test for stereo
#id = 'ifZkeuSrNRc' #account closed
#id = 'M7FIvfx5J10'
#id = 'n-D1EB74Ckg' #vevo
#id = 'lVMWEheQ2hU' #vevo
video = {}
links = []
try: video, links = GetVideoInfo(id)
except : pass
return video, links
def GetVideoInfo(id):
url = 'http://www.youtube.com/watch?v=%s&safeSearch=none' % id
html = FetchPage(url)
video, links = Scrape(html)
video['videoid'] = id
video['thumbnail'] = "http://i.ytimg.com/vi/%s/0.jpg" % video['videoid']
video['title'] = GetVideoTitle(html)
if len(links) == 0:
if 'hlsvp' in video:
video['best'] = video['hlsvp']
else:
video['best'] = links[0][1]
return video, links
def GetVideoTitle(html):
try: return Clean(re.compile('<meta name="title" content="(.+?)">').search(html).groups(1)[0])
except: pass
return 'YouTube Video'
def Scrape(html):
stereo = [82, 83, 84, 85, 100, 101, 102]
video = {}
links = []
flashvars = ExtractFlashVars(html)
if not flashvars.has_key(u"url_encoded_fmt_stream_map"):
return video, links
if flashvars.has_key(u"ttsurl"):
video[u"ttsurl"] = flashvars[u"ttsurl"]
if flashvars.has_key(u" | hlsvp"):
video[u"hlsvp"] = flashvars[u"hlsvp"]
for url_desc in flashvars[u"url_encoded_fmt_stream_map"].split(u","):
url_desc_map = cgi.parse_qs(url_desc)
if not (url_desc_map.has_key(u"url") or url_desc_map.has_key(u"stream")):
| continue
key = int(url_desc_map[u"itag"][0])
url = u""
if url_desc_map.has_key(u"url"):
url = urllib.unquote(url_desc_map[u"url"][0])
elif url_desc_map.has_key(u"conn") and url_desc_map.has_key(u"stream"):
url = urllib.unquote(url_desc_map[u"conn"][0])
if url.rfind("/") < len(url) -1:
url = url + "/"
url = url + urllib.unquote(url_desc_map[u"stream"][0])
elif url_desc_map.has_key(u"stream") and not url_desc_map.has_key(u"conn"):
url = urllib.unquote(url_desc_map[u"stream"][0])
if url_desc_map.has_key(u"sig"):
url = url + u"&signature=" + url_desc_map[u"sig"][0]
elif url_desc_map.has_key(u"s"):
sig = url_desc_map[u"s"][0]
#url = url + u"&signature=" + DecryptSignature(sig)
flashvars = ExtractFlashVars(html, assets=True)
js = flashvars[u"js"]
url += u"&signature=" + DecryptSignatureNew(sig, js)
if key not in stereo:
links.append([key, url])
#links.sort(reverse=True)
return video, links
def DecryptSignature(s):
''' use decryption solution by Youtube-DL project '''
if len(s) == 88:
return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1] + s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12]
elif len(s) == 87:
return s[62] + s[82:62:-1] + s[83] + s[61:52:-1] + s[0] + s[51:2:-1]
elif len(s) == 86:
return s[2:63] + s[82] + s[64:82] + s[63]
elif len(s) == 85:
return s[76] + s[82:76:-1] + s[83] + s[75:60:-1] + s[0] + s[59:50:-1] + s[1] + s[49:2:-1]
elif len(s) == 84:
return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26]
elif len(s) == 83:
return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[53] + s[34:53] + s[24] + s[54:]
elif len(s) == 82:
return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1] + s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34]
elif len(s) == 81:
return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[2] + s[34:53] + s[24] + s[54:81]
elif len(s) == 92:
return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83];
#else:
# print ('Unable to decrypt signature, key length %d not supported; retrying might work' % (len(s)))
def ExtractFlashVars(data, assets=False):
flashvars = {}
found = False
for line in data.split("\n"):
if line.strip().find(";ytplayer.config = ") > 0:
found = True
p1 = line.find(";ytplayer.config = ") + len(";ytplayer.config = ") - 1
p2 = line.rfind(";")
if p1 <= 0 or p2 <= 0:
continue
data = line[p1 + 1:p2]
break
data = RemoveAdditionalEndingDelimiter(data)
if found:
data = json.loads(data)
if assets:
flashvars = data['assets']
else:
flashvars = data['args']
return flashvars
def FetchPage(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 |
ejherran/acaest | Grafos/general/Edge.py | Python | gpl-3.0 | 250 | 0.02 | class Edge():
def __init__(self):
self.origin = None
self.goal = None
self.value = None
| self.isDouble = Fals | e
self.pX = [] # To Draw
self.pY = []
|
mtmail/Nominatim | test/python/mock_legacy_word_table.py | Python | gpl-2.0 | 3,310 | 0.003625 | """
Legacy word table for testing with functions to prefil and test contents
of the table.
"""
class MockLegacyWordTable:
""" A word table for testing using legacy word table structure.
"""
def __init__(self, conn):
self.conn = conn
with conn.cursor() as cur:
cur.execute("""CREATE TABLE word (word_id INTEGER,
word_token text,
word text,
class text,
type text,
country_code varchar(2),
search_name_count INTEGER,
| operator TEXT)""")
conn.commit()
def add_special(self, word_token, word, cls, typ, oper):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (wor | d_token, word, class, type, operator)
VALUES (%s, %s, %s, %s, %s)
""", (word_token, word, cls, typ, oper))
self.conn.commit()
def add_country(self, country_code, word_token):
with self.conn.cursor() as cur:
cur.execute("INSERT INTO word (word_token, country_code) VALUES(%s, %s)",
(word_token, country_code))
self.conn.commit()
def add_postcode(self, word_token, postcode):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_token, word, class, type)
VALUES (%s, %s, 'place', 'postcode')
""", (word_token, postcode))
self.conn.commit()
def count(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM word")
def count_special(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM word WHERE class != 'place'")
def get_special(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT word_token, word, class, type, operator
FROM word WHERE class != 'place'""")
result = set((tuple(row) for row in cur))
assert len(result) == cur.rowcount, "Word table has duplicates."
return result
def get_country(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT country_code, word_token
FROM word WHERE country_code is not null""")
result = set((tuple(row) for row in cur))
assert len(result) == cur.rowcount, "Word table has duplicates."
return result
def get_postcodes(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT word FROM word
WHERE class = 'place' and type = 'postcode'""")
return set((row[0] for row in cur))
def get_partial_words(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT word_token, search_name_count FROM word
WHERE class is null and country_code is null
and not word_token like ' %'""")
return set((tuple(row) for row in cur))
|
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/gdk/WindowType.py | Python | gpl-2.0 | 832 | 0.007212 | # encoding: utf-8
# module gtk.gdk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
from exceptions import Warning
import gio as __gio
import gobj | ect as __gobject
import gobject._gobject as __gobject__gobject
import pango as __pango
import pangocairo as __pangocairo
class WindowType(__gobject.GEnum):
# no doc
def __init__(self, * | args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
}
__gtype__ = None # (!) real value is ''
|
OpenEntityMap/oem-client-anidb | examples/anidb_incremental_local_server.py | Python | bsd-3-clause | 2,697 | 0.001112 | from argparse import ArgumentParser
from flask import Flask, abort, send_from_directory
import json
import logging
import os
app = Flask(__name__)
log = logging.getLogger(__name__)
packages = {}
@app.route('/<package>/<version>/<database>/<collection>/index.<extension>')
def serve_index(package, version, database, collection, extension):
# Find database
if package not in packages or version not in packages[package]:
abort(404)
| path = os.path.join(packages[package][version], database, collection) |
# Ensure index exists
filename = 'index.%s' % extension
if not os.path.exists(os.path.join(path, filename)):
abort(404)
# Serve file
mimetype = None
if extension.endswith('json'):
mimetype = 'application/json'
return send_from_directory(path, filename, mimetype=mimetype)
@app.route('/<package>/<version>/<database>/<collection>/items/<key>.<extension>')
def serve_item(package, version, database, collection, key, extension):
# Find database
if package not in packages or version not in packages[package]:
abort(404)
path = os.path.join(packages[package][version], database, collection, 'items')
# Ensure index exists
filename = '%s.%s' % (key, extension)
if not os.path.exists(os.path.join(path, filename)):
abort(404)
# Serve file
mimetype = None
if extension.endswith('json'):
mimetype = 'application/json'
return send_from_directory(path, filename, mimetype=mimetype)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# Parse arguments
parser = ArgumentParser()
parser.add_argument('-p', '--package', action='append')
args = parser.parse_args()
for path in args.package:
name = os.path.basename(path)
module = name.replace('-', '_')
# Ensure package has a "package.json" file
details_path = os.path.join(path, module, 'package.json')
if not os.path.exists(details_path):
log.warn('[%s] No package details found at %r', name, details_path)
continue
# Read package details
with open(details_path, 'rb') as fp:
details = json.load(fp)
# Retrieve package version
version = details.get('version')
if not version:
log.warn('Package %r has an invalid version defined (%r)', path, version)
continue
log.info('[%s] (v%s) - Found package at: %r', name, version, path)
# Update `packages` dictionary
if name not in packages:
packages[name] = {}
packages[name][version] = path
# Run server
app.run(debug=False)
|
xhava/hippyvm | hippy/objects/closureobject.py | Python | mit | 2,726 | 0.001834 | from hippy.objects.instanceobject import W_InstanceObject
from hippy.klass import W_InvokeCall, def_class
from hippy.builtin import wrap_method, ThisUnwrapper
from hippy.function import Function
class W_CallClosure(W_InvokeCall):
def __init__(self, klass, call_func, w_obj, closure):
W_InvokeCall.__init__(self, klass, call_func, w_obj)
self.closure = closure
def call_args(self, interp, args_w, w_this=None, thisclass=None,
closureargs=None):
closureargs = self.closure
return self.call_func.call_args(interp, args_w, w_this=self.w_obj,
thisclass=self.klass,
closureargs=closureargs)
class W_ClosureObject(W_InstanceObject):
def __init__(self, func, klass, storage_w, w_this=None, static=False):
assert isinstance(func, Function)
self._func = func
self.closure_args = [None] * len(func.closuredecls)
W_InstanceObject.__init__(self, klass, storage_w)
self.w_this = w_this
self.static = static
def setattr(self, interp, attr, w_value, contextclass, unique_item=False):
interp.catchable_fatal("Closure object cannot have properties")
def setattr_ref(self, interp, attr, w_value, contextclass):
interp.catchable_fatal("Closure object cannot have properties")
def clone(self, interp, contextclass):
w_res = W_ClosureObject(self._func, k_Closure,
self.storage_w[:], self.w_this, self.static)
w_res.closure_args = self.closure_args[:]
return w_res
def get_callable(self):
w_this = self.w_this if self.static is False else None
thisclass = w_this.getclass() if w_this is not None else None
return W_CallClosure(thisclass, self._func, w_this, self.closure_args)
def put_closure(self, args_w):
n = len(self.closure_args)
for i, w_arg in enumerate(args_w):
self.closure_args[n - i - 1] = w_arg
def getmeth(self, space, name, contextclass=None):
if name.lower() == "__invoke":
return self.get_callable()
return W_InstanceObject.getmeth(self, space, name, contextclass)
@wrap_method(['interp', ThisUnwrapper(W_Closure | Object), 'args_w | '],
name='Closure::__invoke')
def closure_invoke(interp, this, args_w):
return this._func.call_args(interp, args_w)
k_Closure = def_class('Closure', [closure_invoke])
def new_closure(space, func, w_this, static=False):
w_res = W_ClosureObject(func, k_Closure,
k_Closure.get_initial_storage_w(space)[:],
w_this=w_this, static=static)
return w_res
|
hzeller/rpi-rgb-led-matrix | bindings/python/samples/rotating-block-generator.py | Python | gpl-2.0 | 2,383 | 0.001259 | #!/usr/bin/env python
from samplebase import SampleBase
import math
def scale_col(val, lo, hi):
if val < lo:
return 0
if val > hi:
| return 255
return 255 * (val - lo) / (hi - lo)
def rotate(x, y, sin, cos):
return x * cos - y * sin, x * sin + y * cos
class RotatingBlockGenerator(SampleBase):
def __init__(self, *args, **kwargs):
super(RotatingBlockGenerator, self).__init__(*args, **kwargs)
def run(self):
cent_x = self.matrix.width / 2
cent_y = self | .matrix.height / 2
rotate_square = min(self.matrix.width, self.matrix.height) * 1.41
min_rotate = cent_x - rotate_square / 2
max_rotate = cent_x + rotate_square / 2
display_square = min(self.matrix.width, self.matrix.height) * 0.7
min_display = cent_x - display_square / 2
max_display = cent_x + display_square / 2
deg_to_rad = 2 * 3.14159265 / 360
rotation = 0
# Pre calculate colors
col_table = []
for x in range(int(min_rotate), int(max_rotate)):
col_table.insert(x, scale_col(x, min_display, max_display))
offset_canvas = self.matrix.CreateFrameCanvas()
while True:
rotation += 1
rotation %= 360
# calculate sin and cos once for each frame
angle = rotation * deg_to_rad
sin = math.sin(angle)
cos = math.cos(angle)
for x in range(int(min_rotate), int(max_rotate)):
for y in range(int(min_rotate), int(max_rotate)):
# Our rotate center is always offset by cent_x
rot_x, rot_y = rotate(x - cent_x, y - cent_x, sin, cos)
if x >= min_display and x < max_display and y >= min_display and y < max_display:
x_col = col_table[x]
y_col = col_table[y]
offset_canvas.SetPixel(rot_x + cent_x, rot_y + cent_y, x_col, 255 - y_col, y_col)
else:
offset_canvas.SetPixel(rot_x + cent_x, rot_y + cent_y, 0, 0, 0)
offset_canvas = self.matrix.SwapOnVSync(offset_canvas)
# Main function
if __name__ == "__main__":
rotating_block_generator = RotatingBlockGenerator()
if (not rotating_block_generator.process()):
rotating_block_generator.print_help()
|
AndroidOpenSourceXperia/android_scripts | devhost.py | Python | gpl-2.0 | 15,780 | 0.002155 | #!/usr/bin/env python2
# dev-host-cl Copyright (c) 2013 by GermainZ <germanosz@gmail.om>
# Requirements: python2
# python2-requests
#
# Dev-Host API documentation
# http://d-h.st/api
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import xml.etree.ElementTree as ET
from getpass import getpass
import os
import binascii
import argparse
import time
import json
import threading
import signal
import sys
try:
from requests import get, post
import requests.exceptions
except ImportError:
print "The requests module is required to use this script."
exit(1)
def arg_parser():
"""Parse command line arguments, and return a dict containing them."""
# Create the top level parser
parser = argparse.ArgumentParser(description=("d-h.st (Dev-Host) command"
"line tool"))
parser.add_argument('-u', "--username",
help=("Username. If none is provided, uploads are"
" done anonymously, and only public files are"
" accessible"))
parser.add_argument('-p', "--password",
help=("Password. If only a username is provided, the"
" user will be prompted for one without it"
" appearing on the screen"))
subparsers = parser.add_subparsers(metavar="ACTION", dest="action",
help="Use %(prog)s ACTION -h for help")
# Parent parsers
# We use a parent parser to get the user's info again so that args can be
# after the actions too. For example, this would raise an unknown arg error
# otherwise:
# devhost.py upload file.txt -u myusername -p mypassword
parser_u = argparse.ArgumentParser(add_help=False)
parser_u.add_argument('-u', "--username",
help=("Username. If none is provided, uploads are"
" done anonymously, and only public files are"
" accessible"))
parser_u.add_argument('-p', "--password",
help=("Password. If only a username is provided, the"
" user will be prompted for one without it"
" appearing on the screen"))
# Other parent parsers
parser_c = argparse.ArgumentParser(add_help=False)
parser_c.add_argument("file_code", metavar="file-code",
help=("File code of an existing file. Multiple file"
" code may be specified for certain commands"
" (separated by commas, without spaces)"))
parser_fo = argparse.ArgumentParser(add_help=False)
parser_fo.add_argument("folder_id", metavar="folder-id",
help="ID of an existing folder")
# Create the parser for the "upload" command
parser_upload = subparsers.add_parser("upload", parents=[parser_u],
help="Upload file")
parser_upload.add_argument("my_file", type=argparse.FileType('rb'),
metavar="file", help="File to upload")
parser_upload.add_argument('-d', "--file-desc", help="Description of file")
parser_upload.add_argument('-c', "--file-code", help=("File code of an"
"existing file to update/replace"))
parser_upload.add_argument('-pb', "--public", choices=['0', '1'],
default='0', help=("File is public or private,"
" 0 - private, 1 - public"))
parser_upload.add_argument('-f', "--upload-folder", dest="uploadfolder",
default='0',
help=("Folder id to upload file to. The root"
" folder is chosen by default"))
# Create the parser for the "get-file-info" command
parser_getf = subparsers.add_parser("file-get-info",
parents=[parser_c, parser_u],
help="Return file info")
# Create the parser for the "set-file-info" command
parser_setf = subparsers.add_parser("file-set-info",
parents=[parser_c, parser_u],
help="Set file info")
parser_setf.add_argument('-n', "--file-name", dest="name",
help=h_empty("name"))
parser_setf.add_argument('-d', "--file-desc", dest="description",
help=h_empty("description"))
parser_setf.add_argument('-pb', "--public", choices=['0', '1'],
default='0', help=h_empty("public status, 0 -"
" private, 1 - public"))
parser_setf.add_argument('-f', "--folder-id",
help="Use to change the file's folder")
# Create the parser for the "file | -delete" command
parser_delf = subparsers.add_parser("file-delete",
parents=[parser_c, parser_u],
help="Delete file")
# Create the parser for the "file-move" command
parser_mvf = subparsers.add_parser("file-move",
parents=[parser_c, parser_u],
help="Move file")
parser_mvf.add_argument('-f', "--folder-id",
he | lp=("Use if you want to change the folder."
" Specify folder_id or 0 for root directory"))
# Create the parser for the "get-folder-info" command
parser_getfo = subparsers.add_parser("folder-get-info",
parents=[parser_fo, parser_u],
help="Return folder info")
# Create the parser for the "set-folder-info" command
parser_setfo = subparsers.add_parser("folder-set-info",
parents=[parser_fo, parser_u],
help="Set folder info")
parser_setfo.add_argument('-n', "--folder-name", dest="name",
help=h_empty("name"))
parser_setfo.add_argument('-d', "--folder-desc", dest="description",
help=h_empty("description"))
parser_setfo.add_argument('-f', "--parent-folder-id",
help=("Use to change the parent folder. Specify"
"the folder ID or 0 for root directory"))
# Create the parser for the "folder-delete" command
parser_delfo = subparsers.add_parser("folder-delete",
parents=[parser_fo, parser_u],
help="Delete folder")
# Create the parser for the "folder-move" command
parser_mvfo = subparsers.add_parser("folder-move",
parents=[parser_fo, parser_u],
help="Move folder")
parser_mvfo.add_argument('-f', "--parent-folder-id",
help=("Use if you want to change the folder."
" Specify the folder ID or 0 for root directory"))
# Create the parser for the "folder-create" command
parser_cfo = subparsers.add_parser("folder-create",
parents=[parser_u],
help="Create folder")
parser_cfo.add_argument("name", metav |
DVS-P2P/bubblestorm | testbed/src/testbed/models/HostInteraction.py | Python | gpl-3.0 | 2,749 | 0.04438 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import logging
from ..base import Utils
from ..base.Constants import *
from .Base import *
log = logging.getLogger(__name__)
class CopyDataWrapper(DB_Obj):
'''
file/folder wrapper
'''
def checkLocalFileAccess(self):
for path in glob.glob(self.path()):
if not os.path.exists(path):
log.error("cant access file: %s", path)
quit(1)
def checkTarget(self):
if self.targetSubDir() != None and self.targetSubDir()[0] == '/':
log.error('subfolder should be relative; %s',self.getDB('target_sub_dir'))
quit(1)
if self.isRecursive() == True and self.targetPath()!=None:
log.error('combination: recursiveCopy + targetPath; not allowed')
quit(1)
def path(self):
return self.getDB('path')
def targetPath(self):
return self.getDB('target_file_name')
def targetSubDir(self):
return self.getDB('target_sub_dir')
def isRecursive(self):
return self.getDB('recursive') == True
def isNodegroupData(self):
return False
def isPrototypeData(self):
return False
class PrototypeData(CopyDataWrapper):
'''
a prototype file/folder, i.e. bin, libfolder, datafolder
'''
FIELDS = ['path', 'target_sub_dir', 'recursive', 'target_file_name']
def __init__(self, valuesDict):
for key in self.FIELDS:
self.setDB(key, valuesDict[key])
self.identName = 'PrototypeData "{0}"'.format(self.getDB('path'))
self.checkTarget()
self.checkLocalFileAccess()
def isPrototypeData(self):
return True
def setPrototypeID(self, prototypeID):
self.prototypeID = prototypeID
def getPrototypeID(self):
return self.prototypeID
class ExperimentData(CopyDataWrapper):
'''
a experiment file/folder, i.e. liveexp/*.py, exp_database, ...
'''
FIELDS = ['path', 'target_sub_dir', 'recursive', 'target_file_name']
def __init__(self, valuesDict): |
for key in self.FIELDS:
self.setDB(key, valuesDict[key])
self.identName = 'ExperimentData "{0}"'.format(self.getDB('path'))
self.checkTarget()
self.checkLocalFileAccess()
class NodeData(CopyDataWrapper):
'''
a node data file/folder, i.e. protoype_config_files, keyValueTestFile, ...
'''
FIELDS = ['id', 'path', 'target_sub_dir', 'recursive', 'node_group', 'target_file_name']
TABLE = 'node_data_view'
def __init__(self, | valuesDict):
for key in self.FIELDS:
self.setDB(key, valuesDict[key])
self.identName = 'NodeData "{0}"'.format(self.getDB('path'))
self.checkTarget()
self.checkLocalFileAccess()
def isNodegroupData(self):
return True
def matchNodeGroup(self, nodeGroup):
return (self.getDB('node_group') == None or
self.getDB('node_group') == nodeGroup.dbID())
def nodeGroupID(self):
return self.getDB('node_group') |
otsaloma/nfoview | nfoview/errors.py | Python | gpl-3.0 | 1,118 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) 2008 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ("AffirmationError",)
class AffirmationError(Exception):
"""
Something expected to be ``True`` was ``False``.
:exc:`AffirmationError` is like :exc:`AssertionError`, but without
the special reliance on :const:`__debug__` and given optimization options.
:exc:`Affirma | tionError` can be used to provide essential checks | of boolean
values instead of optional debug checks.
"""
pass
|
patrick-kidger/equinox | equinox/nn/composed.py | Python | apache-2.0 | 3,841 | 0.002083 | import typing
from typing import Any, Callable, List, Optional, Sequence
import jax
import jax.nn as jnn
import jax.random as jrandom
from ..custom_types import Array
from ..module import Module, static_field
from .linear import Linear
def _identity(x):
return x
if getattr(typing, "GENERATING_DOCUMENTATION", False):
def relu(_):
pass
jnn.relu = relu
_identity.__qualname__ = "identity" # Renamed for nicer documentation.
class MLP(Module):
"""Standard Multi-Layer Perceptron; also known as a feed-forward network."""
layers: List[Linear]
activation: Callable
final_activation: Callable
in_size: int = static_field()
out_size: int = static_field()
width_size: int = static_field()
depth: int = static_field()
def __init__(
self,
in_size: int,
out_size: int,
width_size: int,
depth: int,
activation: Callable = jnn.relu,
final_activation: Callable = _identity,
*,
key: "jax.random.PRNGKey",
**kwargs
):
"""**Arguments**:
- `in_size`: The size of the input layer.
- `out_size`: The size of the output layer.
- `width_size`: The size of each hidden layer.
- `depth`: The number of hidden layers.
- `activation`: The activation function after each hidden layer. Defaults to
ReLU.
- `final_activation`: The activation function after the output layer. Defaults
to the identity.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initial | isation. (Keyword only argument.)
"""
super().__init__(**kwargs)
keys = jrandom.split(key, depth + 1)
layers = []
if depth == 0:
layers.append(Linear(in_size, out_size, key=keys[0]))
else:
layers.append(Linear(in_size, width_size, key=keys[0]))
| for i in range(depth - 1):
layers.append(Linear(width_size, width_size, key=keys[i + 1]))
layers.append(Linear(width_size, out_size, key=keys[-1]))
self.layers = layers
self.in_size = in_size
self.out_size = out_size
self.width_size = width_size
self.depth = depth
self.activation = activation
self.final_activation = final_activation
def __call__(
self, x: Array, *, key: Optional["jax.random.PRNGKey"] = None
) -> Array:
"""**Arguments:**
- `x`: A JAX array with shape `(in_size,)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array with shape `(out_size,)`.
"""
for layer in self.layers[:-1]:
x = layer(x)
x = self.activation(x)
x = self.layers[-1](x)
x = self.final_activation(x)
return x
class Sequential(Module):
"""A sequence of [`equinox.Module`][]s applied in order."""
layers: Sequence[Module]
def __call__(self, x: Any, *, key: Optional["jax.random.PRNGKey"] = None) -> Any:
"""**Arguments:**
- `x`: Argument passed to the first member of the sequence.
- `key`: A `jax.random.PRNGKey`, which will be split and passed to every layer
to provide any desired randomness. (Optional. Keyword only argument.)
**Returns:**
The output of the last member of the sequence.
"""
if key is None:
keys = [None] * len(self.layers)
else:
keys = jrandom.split(key, len(self.layers))
for layer, key in zip(self.layers, keys):
x = layer(x, key=key)
return x
Sequential.__init__.__doc__ = """**Arguments:**
- `layers`: A sequence of [`equinox.Module`][]s.
"""
|
Micronaet/micronaet-bom | csv_setup_inventory_delta/delta.py | Python | agpl-3.0 | 7,673 | 0.007429 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import pickle
import xlrd
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from opene | rp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class ClassNameCamelCase(orm.Model):
""" Model name: C | lassNameCamelCase
"""
_inherit = 'product.product.import.inventory'
filename = '/home/administrator/photo/xls/inventory' # TODO parametrize
def action_correct_delta_from_csv(self, cr, uid, ids, context=None):
''' Generate report status for delta inventory
Read files for get product
'''
if context is None:
context = {}
# Pool used:
product_pool = self.pool.get('product.product')
mrp_pool = self.pool.get('mrp.production')
error = ''
note = ''
current_proxy = self.browse(cr, uid, ids, context=context)[0]
# ---------------------------------------------------------------------
# Read parameters:
# ---------------------------------------------------------------------
fullname = current_proxy.fullname
max_line = current_proxy.max_line
# Pickle part for speedup during debug:
use_pickle = False # TODO change
pickle_file = os.path.expanduser('~/pickle.store')
# Init check:
if not fullname:
raise osv.except_osv(
_('Import error'),
_('Need a file name to import in path %s' % fullname),
)
# Log activity:
_logger.info('Start import delta product form: %s' % self.filename)
# ---------------------------------------------------------------------
# Generate movement database:
# ---------------------------------------------------------------------
if use_pickle:
product_movement = pickle.load(
open(pickle_file, 'wb'))
else:
_logger.info('Read halfworked data type')
# Call report for halfwork:
data = {
'mode': 'halfwork',
'for_inventory_delta': True,
}
product_movement = mrp_pool.get_explode_report_object(
cr, uid, data=data, context=context)
# Call report for component:
_logger.info('Read component data type')
data['type'] = 'component'
product_movement.update(
mrp_pool.get_explode_report_object(
cr, uid, data=data, context=context))
pickle.dump(
product_movement,
open(pickle_file, 'wb'),
)
# Read excel filename:
try:
filename = os.path.join(self.filename, fullname)
wb = xlrd.open_workbook(filename)
ws = wb.sheet_by_index(0)
except:
raise osv.except_osv(
_('Open file error'),
_('Cannot found file: %s (or file not in correct format' % \
filename),
)
# Loop on line:
for i in range(0, max_line):
try:
row = ws.row(i) # generate error at end
except:
# Out of range error ends import:
note += _('Import end at line: %s\n') % i
break
try:
# Loop on colums (trace)
try:
default_code = str(row[0].value).replace('.0', '')
except:
default = ''
# Search product with code:
if not default_code:
error += _('%s. No default code on file found\n') % i
continue # jump
try:
product_qty = float(row[1].value)
except:
product_qty = 0
product_ids = product_pool.search(cr, uid, [
('default_code', '=', default_code)], context=context)
if not product_ids:
error += _(
'%s. Error code not found, code: %s\n') % (
i, default_code)
continue # jump
elif len(product_ids) > 1:
error += _(
'%s. Warning more code (take first), code: %s\n') % (
i, default_code)
record = product_movement.get(default_code, False)
if record:
inventory_delta = product_qty - \
sum((
record[3], # SAL value
- record[1], # negative OC value
- record[2], # positive OF value
#- record[0], # XXX no inventory start (yet delta)
)) + record[4] # Delta yet present
note += '%s | %s | %s (previous: %s)\n' % (
i, default_code, inventory_delta,
record[4])
else:
inventory_delta = product_qty
note += '%s. %s NO DATA (set as start)!!!\n' % (
i, default_code)
product_pool.write(cr, uid, product_ids[0], {
'inventory_delta': inventory_delta,
}, context=context)
except:
error += _('%s. Import error code: %s [%s]\n') % (
i, default_code, sys.exc_info())
self.write(cr, uid, ids, {
'error': error,
'note': 'File: %s\n%s' % (
filename, note),
}, context=context)
_logger.info('End import Delta product: %s' % fullname)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anthonylife/TaobaoCompetition2014 | src/mbtalmf/train.py | Python | gpl-2.0 | 3,123 | 0.008966 | #!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#y | ou may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, sof | tware
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/4/10 #
# Call model.py to do time-aware multi-beahvior matrix #
# factorization #
###################################################################
import sys, csv, json, argparse
sys.path.append("../")
from data_io import write_submission
from model import MBTALMF
settings = json.loads(open("../../SETTINGS.json").read())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-init', type=str, action='store', dest='init_choice',
help='specify which method to initialize model parameters')
parser.add_argument('-retrain', type=str, action='store',dest='retrain_choice',
help='specify which method to initialize model parameters')
parser.add_argument('-tv', type=float, action='store', dest='threshold_val',
help='specify the threshold value to generate recommendations')
parser.add_argument('-topk', type=int, action='store',
dest='topk', help='topk for recommendation result')
parser.add_argument('-t', type=int, action='store',
dest='target', help='for validation or test dataset')
if len(sys.argv) != 11:
print 'Command e.g.: python train.py -retrain True -init zero(gaussian) '\
+ '-tv 0.8 -topk 5 -t 0(1)'
para = parser.parse_args()
if para.target == 0:
data_file = settings["TRAIN_DATA_FILE"]
elif para.target == 1:
data_file = settings["TAR_DATA_FILE"]
else:
print 'Choice of file invalid!'
sys.exit(1)
mbtalmf = MBTALMF()
if para.retrain_choice == "True":
mbtalmf.model_init(data_file, para.init_choice, para.target)
mbtalmf.train()
recommend_result = mbtalmf.genRecommendResult(True, data_file,
para.init_choice, para.threshold_val, para.target)
#recommend_result = mbtalmf.genTopkRecommendResult(False, data_file,
# para.init_choice, para.topk, para.target)
write_submission(recommend_result)
else:
#recommend_result = mbtalmf.genRecommendResult(False, data_file,
# para.init_choice, para.threshold_val, para.target)
recommend_result = mbtalmf.genTopkRecommendResult(False, data_file,
para.init_choice, para.topk, para.target)
write_submission(recommend_result)
if __name__ == "__main__":
main()
|
coati-00/nepi | nepi/activities/urls.py | Python | gpl-2.0 | 1,778 | 0 | from django.conf.urls import url, patterns
'''Want to switch to class based views but not sure how'''
from nepi.activities.views import (UpdateConversationView,
| DeleteConversationView,
ScenarioListView,
| ScenarioDetailView,
ScenarioDeleteView,
CreateConverstionView,
SaveResponse,
LastResponse,
CreateCalendar,
SaveRetentionResponse,
SaveCalendarResponse)
urlpatterns = patterns(
'',
url(r'^class_create_conversation/(?P<pk>\d+)/$',
CreateConverstionView.as_view(),
name='create_conversation'),
url(r'^update_conversation/(?P<pk>\d+)/$',
UpdateConversationView.as_view(),
name='update_conversation'),
url(r'^delete_conversation/(?P<pk>\d+)/$',
DeleteConversationView.as_view()),
url(r'^classview_scenariolist/$',
ScenarioListView.as_view()),
url(r'^delete_scenario/(?P<pk>\d+)/$',
ScenarioDeleteView.as_view()),
url(r'^scenario_display/(?P<pk>\d+)/$',
ScenarioDetailView.as_view()),
url(r'^get_click/$',
SaveResponse.as_view(),
name='save-response'),
url(r'^get_last/$',
LastResponse.as_view()),
url(r'^create_calendar/$',
CreateCalendar.as_view(),
name='create_calendar'),
url(r'^retention_click/$',
SaveRetentionResponse.as_view(),
name='retention_click'),
url(r'^calendar_click/$',
SaveCalendarResponse.as_view(),
name='calendar_click'),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.