repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
MyRobotLab/pyrobotlab | home/brotherbrown831/old_py/HomeAutomation.py | Python | apache-2.0 | 1,156 | 0.016436 | from org.myrobotlab.net import BareBonesBrowserLaunch
def outsideLights(value):
if value = 1
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")
else
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum= | 6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=0")
def garageLights(value):
if value = 1
BareBonesBrowserLaunch.o | penURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")
else
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=0")
def alarmOn(value):
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")
|
ziirish/burp-ui | burpui/misc/auth/ldap.py | Python | bsd-3-clause | 11,183 | 0.001162 | # -*- coding: utf8 -*-
from flask_login import AnonymousUserMixin
from .interface import BUIhandler, BUIuser, BUIloader
from ...utils import __
import ssl
try:
from ldap3 import (
Server,
Connection,
Tls,
ALL,
RESTARTABLE,
AUTO_BIND_TLS_BEFORE_BIND,
AUTO_BIND_NONE,
SIMPLE,
)
except ImportError:
raise ImportError("Unable to load 'ldap3' module")
class LdapLoader(BUIloader):
"""The :class:`burpui.misc.auth.ldap.LdapLoader` handles searching for and
binding as a :class:`burpui.misc.auth.ldap.LdapUser` user.
"""
section = name = "LDAP:AUTH"
def __init__(self, app=None, handler=None):
""":func:`burpui.misc.auth.ldap.LdapLoader.__init__` establishes a
connection to the LDAP server.
:param app: Instance of the app we are running in
:type app: :class:`burpui.engines.server.BUIServer`
"""
self.app = app
conf = self.app.conf
handler.name = self.name
defaults = {
"LDAP:AUTH": {
"host": "localhost",
"port": None,
"encryption": None,
"binddn": None,
"bindpw": None,
"filter": None,
"base": None,
"searchattr": "uid",
"validate": "none",
"cafile": None,
}
}
mapping = {
"host": "host",
"port": "port",
"encryption": "encryption",
"filt": "filter",
"base": "base",
"attr": "searchattr",
"binddn": "binddn",
"bindpw": "bindpw",
"validate": "validate",
"cafile": "cafile",
}
conf.update_defaults(defaults)
# Maybe the handler argument is None, maybe the 'priority'
# option is missing. We don't care.
try:
handler.priority = (
conf.safe_get("priority", "integer", section=self.section)
| or handler.priority
)
except:
pass
for (opt, key) in mapping.items():
setattr(self, opt, conf.safe_get(key, "force_string", section=self.section))
if self.validate and self.validate.lower() in ["none", "optional", "required"]:
self.validate = getattr(s | sl, "CERT_{}".format(self.validate.upper()))
else:
self.validate = None
self.version = ssl.OP_NO_SSLv3
self.users = []
self.tls = None
self.ssl = False
self.auto_bind = AUTO_BIND_NONE
if self.encryption == "ssl":
self.ssl = True
elif self.encryption == "tls":
self.tls = Tls(
local_certificate_file=self.cafile,
validate=self.validate,
version=self.version,
)
self.auto_bind = AUTO_BIND_TLS_BEFORE_BIND
if self.port:
try:
self.port = int(self.port)
except ValueError:
self.logger.error("LDAP port must be a valid integer")
self.port = None
self.logger.info("LDAP host: {0}".format(self.host))
self.logger.info("LDAP port: {0}".format(self.port))
self.logger.info("LDAP encryption: {0}".format(self.encryption))
self.logger.info("LDAP filter: {0}".format(self.filt))
self.logger.info("LDAP base: {0}".format(self.base))
self.logger.info("LDAP search attr: {0}".format(self.attr))
self.logger.info("LDAP binddn: {0}".format(self.binddn))
self.logger.info("LDAP bindpw: {0}".format("*****" if self.bindpw else "None"))
self.logger.info("TLS object: {0}".format(self.tls))
try:
self.server = Server(
host=self.host,
port=self.port,
use_ssl=self.ssl,
get_info=ALL,
tls=self.tls,
)
self.logger.debug("LDAP Server = {0}".format(str(self.server)))
if self.binddn:
self.ldap = Connection(
self.server,
user=self.binddn,
password=self.bindpw,
raise_exceptions=True,
client_strategy=RESTARTABLE,
auto_bind=self.auto_bind,
authentication=SIMPLE,
)
else:
self.ldap = Connection(
self.server,
raise_exceptions=True,
client_strategy=RESTARTABLE,
auto_bind=self.auto_bind,
)
okay = False
with self.ldap:
self.logger.debug("LDAP Connection = {0}".format(str(self.ldap)))
self.logger.info("OK, connected to LDAP")
okay = True
if not okay:
raise Exception("Not connected")
self._prefetch()
except Exception as e:
self.logger.error("Could not connect to LDAP: {0}".format(str(e)))
self.server = None
self.ldap = None
def __exit__(self, exc_type, exc_value, traceback):
""":func:`burpui.misc.auth.ldap.LdapLoader.__exit__` closes the
connection to the LDAP server.
"""
if self.ldap and self.ldap.bound:
self.ldap.unbind()
def fetch(self, searchval=None, uniq=True):
""":func:`burpui.misc.auth.ldap.LdapLoader.fetch` searches for a user
object in the LDAP server.
:param searchval: attribute value to search for
:type searchval: str
:param uniq: only return one result
:type uniq: bool
:returns: dictionary of `distinguishedName` and `commonName` attributes for the
user if found, otherwise None.
"""
try:
if self.filt:
query = self.filt.format(self.attr, searchval)
else:
query = "({0}={1})".format(self.attr, searchval)
self.logger.info("filter: {0} | base: {1}".format(query, self.base))
r = None
with self.ldap:
self.logger.debug("LDAP Connection = {0}".format(str(self.ldap)))
self.ldap.search(self.base, query, attributes=["cn", self.attr])
r = self.ldap.response
if not r:
raise ValueError("no results")
except Exception as e:
self.logger.error("Ooops, LDAP lookup failed: {0}".format(str(e)))
return None
if not uniq:
return r
for record in r:
attrs = record["attributes"]
if self.attr in attrs and searchval in attrs[self.attr]:
self.logger.info("Found DN: {0}".format(record["dn"]))
return {"dn": record["dn"], "cn": attrs["cn"][0]}
def _prefetch(self):
"""Prefetch all users that match the filter/base"""
self.users = []
results = self.fetch("*", False) or []
for record in results:
attrs = record["attributes"]
if self.attr in attrs:
self.users.append(attrs[self.attr][0])
self.logger.debug(self.users)
def check(self, dn=None, passwd=None):
""":func:`burpui.misc.auth.ldap.LdapLoader.check` authenticates a user
against the LDAP server.
:param dn: canonical `dn` of the user to authenticate as
:type dn: str
:param passwd: password of the user to authenticate as
:type passwd: str
:returns: True if bind was successful, otherwise False
"""
try:
with Connection(
self.server,
user="{0}".format(dn),
password=passwd,
raise_exceptions=True,
auto_bind=self.auto_bind,
authentication=SIMPLE,
) as con:
self.logger.debug("LDAP Connection = {0}".format(str(con)))
self.logger.info("Bound as user: {0}".format(dn))
return con.bind()
|
ARMmbed/greentea | src/htrun/host_tests_plugins/host_test_plugins.py | Python | apache-2.0 | 12,644 | 0.00174 | #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Base class for plugins."""
import os
import sys
import platform
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
from mbed_lstools.main import create
from ..host_tests_logger import HtrunLogger
class HostTestPluginBase:
"""Base class for all plugins used with host tests."""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, CopyMethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
required_parameters = (
[]
) # Parameters required for 'kwargs' in plugin APIs: e.g. self.execute()
stable = False # Determine if plugin is stable and can be used
def __init__(self):
"""Initialise the object."""
# Setting Host Test Logger instance
ht_loggers = {
"BasePlugin": HtrunLogger("PLGN"),
"CopyMethod": HtrunLogger("COPY"),
"ResetMethod": HtrunLogger("REST"),
}
self.plugin_logger = ht_loggers.get(self.type, ht_loggers["BasePlugin"])
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
"""Configure plugin.
This function should be called before plugin execute() method is used.
"""
return False
def execute(self, capability, *args, **kwargs):
"""Execute plugin 'capability' by name.
Each capability may directly just call some command line program or execute a
function.
Args:
capability: Capability name.
args: Additional arguments.
kwargs: Additional arguments.
Returns:
Capability call return value.
"""
return False
def is_os_supported(self, os_name=None):
"""Check if the OS is supported by this plugin.
In some cases a plugin will not work under a particular OS. Usually because the
command line tool used to implement the plugin functionality is not available.
Args:
os_name: String describing OS. See self.host_os_support() and
self.host_os_info()
Returns:
True if plugin works under certain OS.
"""
return True
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
"""Print error messages to the console.
Args:
text: Text to print.
"""
self.plugin_logger.prn_err(text)
return False
def print_plugin_info(self, text, NL=True):
"""Print notifications to the console.
Args:
text: Text to print.
NL: (Deprecated) Newline will be added behind text if this flag is True.
"""
self.plugin_logger.prn_inf(text)
return True
def print_plugin_char(self, char):
"""Print a char to st | dout."""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(
| self,
destination_disk,
init_delay=0.2,
loop_delay=0.25,
target_id=None,
timeout=60,
):
"""Wait until destination_disk is ready and can be accessed.
Args:
destination_disk: Mount point (disk) which will be checked for readiness.
init_delay: Initial delay time before first access check.
loop_delay: Polling delay for access check.
timeout: Polling timeout in seconds.
Returns:
True if mount point was ready in given time, otherwise False.
"""
if target_id:
# Wait for mount point to appear with mbed-ls
# and if it does check if mount point for target_id changed
# If mount point changed, use new mount point and check if its ready.
new_destination_disk = destination_disk
# Sometimes OSes take a long time to mount devices (up to one minute).
# Current pooling time: 120x 500ms = 1 minute
self.print_plugin_info(
"Waiting up to %d sec for '%s' mount point (current is '%s')..."
% (timeout, target_id, destination_disk)
)
timeout_step = 0.5
timeout = int(timeout / timeout_step)
for i in range(timeout):
# mbed_lstools.main.create() should be done inside the loop.
# Otherwise it will loop on same data.
mbeds = create()
mbed_list = mbeds.list_mbeds() # list of mbeds present
# get first item in list with a matching target_id, if present
mbed_target = next(
(x for x in mbed_list if x["target_id"] == target_id), None
)
if mbed_target is not None:
# Only assign if mount point is present and known (not None)
if (
"mount_point" in mbed_target
and mbed_target["mount_point"] is not None
):
new_destination_disk = mbed_target["mount_point"]
break
sleep(timeout_step)
if new_destination_disk != destination_disk:
# Mount point changed, update to new mount point from mbed-ls
self.print_plugin_info(
"Mount point for '%s' changed from '%s' to '%s'..."
% (target_id, destination_disk, new_destination_disk)
)
destination_disk = new_destination_disk
result = True
# Check if mount point we've promoted to be valid one (by optional target_id
# check above)
# Let's wait for 30 * loop_delay + init_delay max
if not access(destination_disk, F_OK):
self.print_plugin_info(
"Waiting for mount point '%s' to be ready..." % destination_disk,
NL=False,
)
sleep(init_delay)
for i in range(30):
if access(destination_disk, F_OK):
result = True
break
sleep(loop_delay)
self.print_plugin_char(".")
else:
self.print_plugin_error(
"mount {} is not accessible ...".format(destination_disk)
)
result = False
return (result, destination_disk)
def check_serial_port_ready(self, serial_port, target_id=None, timeout=60):
"""Check and update serial port name information for DUT.
If no target_id is specified return the old serial port name.
Args:
serial_port: Current serial port name.
target_id: Target ID of a device under test.
timeout: Serial port pooling timeout in seconds.
Returns:
Tuple with result (always True) and serial port read from mbed-ls.
"""
# If serial port changed (check using mbed-ls), use new serial port
new_serial_port = None
if target_id:
# Sometimes OSes take a long time to mount devices (up to one minute).
# Current pooling time: 120x 500ms = 1 minute
|
googleapis/python-compute | google/cloud/compute_v1/services/region_commitments/transports/__init__.py | Python | apache-2.0 | 1,127 | 0.000887 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Lic | ense.
#
from collections import OrderedDict
from typing import Dict, Type
from .base | import RegionCommitmentsTransport
from .rest import RegionCommitmentsRestTransport
from .rest import RegionCommitmentsRestInterceptor
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[RegionCommitmentsTransport]]
_transport_registry["rest"] = RegionCommitmentsRestTransport
__all__ = (
"RegionCommitmentsTransport",
"RegionCommitmentsRestTransport",
"RegionCommitmentsRestInterceptor",
)
|
karlind/ewu-v4 | products/migrations/0004_auto_20160319_1100.py | Python | gpl-3.0 | 566 | 0.001767 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-19 03:00
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20160319_1021'),
]
operations = [
migrations.AlterField(
model_name='product',
name='pub_date',
fiel | d=models.DateTimeField(default=datetime.datetime(2016, 3, 19, 3, 0, 38, 97125, tz | info=utc)),
),
]
|
ajbouh/tfi | src/tfi/parse/iterators.py | Python | mit | 7,857 | 0.000255 | # -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A collection of helpful iterators.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import collections
class peek_iter(object):
"""An iterator object that supports peeking ahead.
Parameters
----------
o : iterable or callable
`o` is interpreted very differently depending on the presence of
`sentinel`.
If `sentinel` is not given, then `o` must be a collection object
which supports either the iteration protocol or the sequence protocol.
If `sentinel` is given, then `o` must be a callable object.
sentinel : any value, optional
If given, the iterator will call `o` with no arguments for each
call to its `next` method; if the value returned is equal to
`sentinel`, :exc:`StopIteration` will be raised, otherwise the
value will be returned.
See Also
--------
`peek_iter` can operate as a drop in replacement for the built-in
`iter <https://docs.python.org/2/library/functions.html#iter>`_ function.
Attributes
----------
sentinel
The value used to indicate the iterator is exhausted. If `sentinel`
was not given when the `peek_iter` was instantiated, then it will
be set to a new object instance: ``object()``.
"""
def __init__(self, *args):
# type: (Any) -> None
"""__init__(o, sentinel=None)"""
self._iterable = iter(*args) # type: Iterable
self._cache = collections.deque() # type: collections.deque
if len(args) == 2:
self.sentinel = args[1]
else:
self.sentinel = object()
def __iter__(self):
# type: () -> peek_iter
return self
def __next__(self, n=None):
# type: (int) -> Any
# note: prevent 2to3 to transform self.next() in next(self) which
# causes an infinite loop !
return getattr(self, 'next')(n)
def _fillcache(self, n):
# type: (int) -> None
"""Cache `n` items. If `n` is 0 or None, then 1 item is cached."""
if not n:
n = 1
try:
while len(self._cache) < n:
self._cache.append(next(self._iterable)) # type: ignore
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
def has_next(self):
# type: () -> bool
"""Determine if iterator is exhausted.
Returns
-------
bool
True if iterator has more items, False otherwise.
Note
----
Will never raise :exc:`StopIteration`.
"""
return self.peek() != self.sentinel
def next(self, n=None):
# type: (int) -> Any
"""Get the next item or `n` items of the iterator.
Parameters
----------
n : int or None
The number of items to retrieve. Defaults to None.
Returns
-------
item or list of items
The next item or `n` items of the iterator. If `n` is None, the
item itself is returned. If `n` is an int, the items will be
returned in a list. If `n` is 0, an empty list is returned.
Raises
------
StopIteration
Raised if the iterator is exhausted, even if `n` is 0.
"""
self._fillcache(n)
if not n:
if self._cache[0] == self.sentinel:
raise StopIteration
if n is None:
result = self._cache.popleft()
else:
result = []
else:
if self._cache[n - 1] == self.sentinel:
raise StopIteration
result = [self._cache.popleft() for i in range(n)]
return result
def peek(self, n=None):
# type: (int) -> Any
"""Preview the next item or `n` items of the iterator.
The iterator is not advanced when peek is called.
Returns
-------
item or list of items
The next item or `n` items of the iterator. If `n` is None, the
item itself is returned. If `n` is an int, the items will be
returned in a list. If `n` is 0, an empty list is returned.
If the iterator is exhausted, `peek_iter.sentinel` is returned,
or placed as the last item in the returned list.
Note
----
Will never raise :exc:`StopIteration`.
"""
self._fillcache(n)
if n is None:
result = self._cache[0]
else:
result = [self._cache[i] for i in range(n)]
return result
class modify_iter(peek_iter):
"""An iterator object that supports modifying items as they are returned.
Parameters
----------
o : iterable or callable
`o` is interpreted very differently depending on the presence of
`sentinel`.
If `sentinel` is not given, then `o` must be a collection object
which supports either the iteration protocol or the sequence protocol.
If `sentinel` is given, then `o` must be a callable object.
sentinel : any value, optional
If given, the iterator will call `o` with no arguments for each
call to its `next` method; if the value returned is equal to
`sentinel`, :exc:`StopIteration` will be raised, otherwise the
value will be returned.
modifier : callable, optional
The function that will be used to modify each item returned by the
iterator. `modifier` should take a single argument and return a
single value. Defaults to ``lambda x: x``.
If `sentinel` is not given, `modifier` must be passed as a keyword
argument.
Attributes
----------
modifier : callable
`modifier` is called with each item in `o` as it is iterated. The
return value of `modifier` is returned in lieu of the item.
Values returned by `peek` as well as `next` are affected by
`modifier`. However, `modify_iter.sentinel` is never passed through
`modifier`; it will always be returned from `peek` unmodified.
Example
-------
>>> a = [" A list ",
... " of strings ",
... " with ",
... " extra ",
... " whitespace. "]
>>> modifier = lambda s: s.strip().replace('with', 'without')
>>> for s in modify_iter(a, modifier=modifier):
... print('"%s"' % s)
"A list"
"of strings"
"without"
"extra"
"whitespace."
"""
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
"""__init__(o, sentinel=N | one, modifier=lambda x: x)"""
if 'modifier' in kwargs:
self.modifier = kwargs['modifier']
elif len(args) > 2:
self.modifier = args[2]
args = args[:2]
else:
self.modifier = lambda x: x
if not callable(self.modifier):
raise TypeError('modify_iter(o, modifier): '
'modifier must be callable')
super(modify_iter, self).__ini | t__(*args)
def _fillcache(self, n):
# type: (int) -> None
"""Cache `n` modified items. If `n` is 0 or None, 1 item is cached.
Each item returned by the iterator is passed through the
`modify_iter.modified` function before being cached.
"""
if not n:
n = 1
try:
while len(self._cache) < n:
self._cache.append(self.modifier(next(self._iterable))) # type: ignore
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
|
Phyks/libbmc | libbmc/tests/test_fetcher.py | Python | mit | 483 | 0 | import unittest
from libbmc.fetcher import *
class TestFetcher(unittest.Test | Case):
def test_download(self):
dl, contenttype = download('http://arxiv.org/pdf/1312.4006.pdf')
self.assertIn(contenttype, ['pdf', 'djvu'])
self.assertNotEqual(dl, '')
def test_download_invalid_type(self):
self.assertEqual(download('http://phyks.me/'), (None, None))
def test_download_invalid_url(self):
self.asse | rtEqual(download('a'), (None, None))
|
mitsuhiko/sentry | tests/sentry/models/test_file.py | Python | bsd-3-clause | 1,674 | 0 | from __future__ import absolute_import
from django.core.files.base import ContentFile
from sentry.models import File, FileBlob
f | rom sentry.testutils import TestCase
class FileBlobTest(TestCase):
def test_from_file(self):
fileobj = ContentFile("foo bar")
my_file1 = FileBlob.from_file(fileobj)
assert my_file1.path
my_file2 = FileBlob.from_file(fileobj)
# deep check
assert my_file1.id == my_file2.id
assert my_file1.checksum == my_file2.checksum
assert my_file1.path | == my_file2.path
class FileTest(TestCase):
def test_file_handling(self):
fileobj = ContentFile("foo bar")
file1 = File.objects.create(
name='baz.js',
type='default',
size=7,
)
results = file1.putfile(fileobj, 3)
assert len(results) == 3
assert results[0].offset == 0
assert results[1].offset == 3
assert results[2].offset == 6
fp = None
with file1.getfile() as fp:
assert fp.read() == 'foo bar'
fp.seek(2)
fp.tell() == 2
assert fp.read() == 'o bar'
fp.seek(0)
fp.tell() == 0
assert fp.read() == 'foo bar'
fp.seek(4)
fp.tell() == 4
assert fp.read() == 'bar'
fp.seek(1000)
fp.tell() == 1000
with self.assertRaises(IOError):
fp.seek(-1)
with self.assertRaises(ValueError):
fp.seek(0)
with self.assertRaises(ValueError):
fp.tell()
with self.assertRaises(ValueError):
fp.read()
|
rrooij/youtube-dl | youtube_dl/extractor/asiancrush.py | Python | unlicense | 3,951 | 0.001012 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .kaltura import KalturaIE
from ..utils import (
extract_attributes,
remove_end,
)
class AsianCrushIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?asiancrush\.com/video/(?:[^/]+/)?0+(?P<id>\d+)v\b'
_TESTS = [{
'url': 'https://www.asiancrush.com/video/012869v/women-who-flirt/',
'md5': 'c3b740e48d0ba002a42c0b72857beae6',
'info_dict': {
'id': '1_y4tmjm5r',
'ext': 'mp4',
'title': 'Women Who Flirt',
'description': 'md5:3db14e9186197857e7063522cb89a805',
'timestamp': 1496936429,
'upload_date': '20170608',
'uploader_id': 'craig@crifkin.com',
},
}, {
'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
entry_id, partner_id, title = [None] * 3
vars = self._parse_json(
self._search_regex(
r'iEmbedVars\s*=\s*({.+?})', webpage, 'embed vars',
default='{}'), video_id, fatal=False)
if vars:
entry_id = vars.get('entry_id')
partner_id = vars.get('partner_id')
title = vars.get('vid_label')
if not entry_id:
entry_id = self._search_regex(
r'\bentry_id["\']\s*:\s*["\'](\d+)', webpage, 'entry id')
player = self._download_webpage(
'https://api.asiancrush.com/embeddedVideoPlayer', video_id,
query={'id': entry_id})
kaltura_id = self._search_regex(
r'entry_id["\']\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1', player,
'kaltura id', group='id')
if not partner_id:
partner_id = self._search_regex(
r'/p(?:artner_id)?/(\d+)', player, 'partner id',
default='513551')
return self.url_result(
'kaltura:%s:%s' % (partner_id, kaltura_id),
ie=KalturaIE.ie_key(), video_id=kaltura_id,
video_title=title)
class AsianCrushPlaylistIE(InfoExtractor):
_VALID_UR | L = r'https?://(?:www\.)?asiancrush\.com/series/0+(?P<id>\d+)s\b'
_TEST = {
'url': 'https://www.asiancrush.com/series/012481s/scholar-walks-night/',
'info_dict': {
'id': '12481',
'title': 'Scholar Who Walks the Night',
| 'description': 'md5:7addd7c5132a09fd4741152d96cce886',
},
'playlist_count': 20,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = []
for mobj in re.finditer(
r'<a[^>]+href=(["\'])(?P<url>%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL,
webpage):
attrs = extract_attributes(mobj.group(0))
if attrs.get('class') == 'clearfix':
entries.append(self.url_result(
mobj.group('url'), ie=AsianCrushIE.ie_key()))
title = remove_end(
self._html_search_regex(
r'(?s)<h1\b[^>]\bid=["\']movieTitle[^>]+>(.+?)</h1>', webpage,
'title', default=None) or self._og_search_title(
webpage, default=None) or self._html_search_meta(
'twitter:title', webpage, 'title',
default=None) or self._search_regex(
r'<title>([^<]+)</title>', webpage, 'title', fatal=False),
' | AsianCrush')
description = self._og_search_description(
webpage, default=None) or self._html_search_meta(
'twitter:description', webpage, 'description', fatal=False)
return self.playlist_result(entries, playlist_id, title, description)
|
ldong/vim_youcompleteme | cpp/ycm/.ycm_extra_conf.py | Python | gpl-3.0 | 6,515 | 0.02287 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, f | lag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return | new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
dtrip/weevely3 | tests/test_file_grep.py | Python | gpl-3.0 | 6,721 | 0.013837 | from testfixtures import log_capture
from tests.base_test import BaseTest
from tests import config
from core.sessions import SessionURL
from core import modules
import utils
from core import messages
import subprocess
import os
import tempfile
import random
def setUpModule():
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_grep/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER/dir1/dir2/dir3/dir4"
echo string1 > "$BASE_FOLDER/dir1/string1"
echo string12 > "$BASE_FOLDER/dir1/dir2/string12"
echo 'string3\nSTR33' > "$BASE_FOLDER/dir1/dir2/dir3/string3"
echo string4 > "$BASE_FOLDER/dir1/dir2/dir3/dir4/string4"
chmod 0111 "$BASE_FOLDER/dir1/dir2/dir3/dir4/string4"
chown www-data: -R "$BASE_FOLDER/"
""".format(
config = config
), shell=True)
class FileGrep(BaseTest):
folders_rel = [
'test_file_grep/dir1',
'test_file_grep/dir1/dir2',
'test_file_grep/dir1/dir2/dir3',
'test_file_grep/dir1/dir2/dir3/dir4',
]
files_rel = [
'test_file_grep/dir1/string1',
'test_file_grep/dir1/dir2/string12',
'test_file_grep/dir1/dir2/dir3/string3',
'test_file_grep/dir1/dir2/dir3/dir4/string4',
]
def setUp(self):
self.session = SessionURL(
self.url,
self.password,
volatile = True
)
modules.load_modules(self.session)
self.vector_list = modules.loaded['file_grep'].vectors.get_names()
self.run_argv = modules.loaded['file_grep'].run_argv
def test_file_grep(self):
for vect in self.vector_list:
# grep string1 -> string[0]
self.assertEqual(
self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring1' ])[0],
{
self.files_rel[0] : ['string1'],
self.files_rel[1] : ['string12']
}
)
# grep string3 -> []
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring4' ])[0],{})
# grep string[2-9] -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring[2-9]' ])[0],{ self.files_rel[2] : ['string3'] })
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[2], 'string.*' ])[0],{ self.files_rel[2] : ['string3'] })
def test_file_grep_invert(self):
for vect in self.vector_list:
# grep -v string1 -> string3
self.assertEqual(
self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring1', '-v' ])[0],
{
self.files_rel[2] : ['string3', 'STR33'],
# self.files_rel[3] : ['string4'] # String 4 is 0111
}
)
# grep -v bogus -> string1,2,3
self.assertEqual(
self.run_argv([ '-vector', vect, self.folders_rel[0], 'bogus', '-v' ])[0],
{
self.files_rel[0] : ['string1'],
self.files_rel[1] : ['string12'],
self.files_rel[2] : ['string3', 'STR33']
}
)
# grep -v -i STR from string[2] -> string3
self.assertEqual(self.run_argv([ '-vector', vect, self.files_rel[2], '-v', '-case', 'STR' ])[0],{ self.files_rel[2] : ['string3'] })
def test_file_grep_output_remote(self):
for vect in self.vector_list:
output_path = os.path.join(config.base_folder, 'test_file_grep', 'test_%s_%i' % (vect, random.randint(1,99999)))
# grep string3 -> []
self.assertTrue(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring4', '-output', output_path ])[1])
self.assertEqual(subprocess.check_output(
'cat "%s"' % (output_path),
shell=True
), ''
)
subprocess.check_output(
'rm -f %s' % (output_path),
shell=True)
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[2], 'string.*', '-output', output_path ])[0],{ self.files_rel[2] : ['string3'] })
self.assertEqual(subprocess.check_output(
'cat "%s"' % (output_path),
shell=True), 'string3'
)
subprocess.check_output(
'rm -f %s' % (output_path),
shell=True)
def test_file_grep_output_local(self):
for vect in self.vector_list:
temp_file = tempfile.NamedTemporaryFile()
# grep string3 -> []
self.assertTrue(self.run_argv([ '-vector', vect, self.folders_rel[0], 'tring4', '-output', temp_file.name, '-local' ])[1])
self.assertEqual('', open(temp_file.name,'r').read())
temp_file.truncate()
# grep rpath=folder2 string -> string[3]
self.assertEqual(self.run_argv([ '-vector', vect, self.folders_rel[2], 'string.*', '-output', temp_file.name, '-local' ])[0],{ self.files_rel[2] : ['string3'] })
self.assertEqual('string3', open(temp_file.name,'r').read())
temp_file.close()
@log_capture()
def test_php_err(self, log_captured):
# wrong rpath generate None and warning print
self.assertEqual(self.run_argv([ 'bogus', 'tring4' ])[0], None)
self.assertEqual(messages. | module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
# wrong regex generate None and warning print
self.assertEqual(self.run_argv([ '\'', 'tring4' ])[0], None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
@log_capture()
def test_sh_err(self, log_captured):
# wrong rpath generate None and warning print
self.assertEqual(self.run_arg | v([ '-vector', 'grep_sh', 'bogus', 'tring4' ])[0], None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
# wrong regex generate None and warning print
self.assertEqual(self.run_argv([ '-vector', 'grep_sh', '\'', 'tring4' ])[0], None)
self.assertEqual(messages.module_file_grep.failed_retrieve_info,
log_captured.records[-1].msg)
|
anovak10/plots | DDTmethod/CutCounter.py | Python | mit | 2,167 | 0.031841 | #
import os
import math
from array import array
import optparse
import ROOT
from ROOT import *
import scipy
import Plotting_Header
from Plotting_Header import *
def cutcount(varname):
VAR = [varname, 50, 0, 100 ]
YT = "events / "+str((VAR[3]-VAR[2])/VAR[1])+" GeV"
#YT = "events"
XT = varname+" (GeV)"
H = "Type 1 (e) control region"
Cut = "(LepPt<50.)"
treename="tree_T1"
lumi = str(12.7)
Data = TH1F("DATA", "", VAR[1], VAR[2], VAR[3])
Data.Sumw2()
Data.SetLineColor(1)
Data.SetFillColor(0)
Data.SetMarkerColor(1)
Data.SetMarkerStyle(20)
quickplot("/home/storage | /andrzejnovak/March/SE.root", treename, Data, VAR[0], Cut, "(1.0)")
quickplot("/home/storage/andrzejnovak/March/SM.root", treename, Data, VAR[0], Cut, "(1.0)")
d = Data.GetEntries()
W = TH1F("W", "", VAR[1], VAR[2], VAR[3])
W.SetLineColor(kGreen-6)
W.SetLineWidth(2)
quickplot("/home/storage/andrzejnovak/March/WJetsToQQ.root", treename, W, VAR[0], Cut, "("+lumi+"*weight)")
f | or w in ["100To200", "200To400", "400To600", "600To800", "800To1200", "1200To2500", "2500ToInf"]:
quickplot("/home/storage/andrzejnovak/March/WJetsToLNu_HT-"+w+".root", treename, W, VAR[0], Cut, "("+lumi+"*weight)")
w = W.GetEntries()
QCD = TH1F("QCD", "", VAR[1], VAR[2], VAR[3])
QCD.SetLineColor(kYellow)
QCD.SetLineWidth(2)
for q in ["300to500", "500to700", "700to1000", "1000to1500", "1500to2000", "2000toInf"]:
quickplot("/home/storage/andrzejnovak/March/QCD_HT"+q+".root", treename, QCD, VAR[0], Cut, "("+lumi+"*weight)")
q = QCD.GetEntries()
TT = TH1F("TT", "", VAR[1], VAR[2], VAR[3])
TT.SetLineColor(kRed-4)
TT.SetLineWidth(2)
quickplot("/home/storage/andrzejnovak/March/TT.root", treename,TT, VAR[0], Cut, "("+lumi+"*weight)")
t = TT.GetEntries()
ST = TH1F("ST", "", VAR[1], VAR[2], VAR[3])
ST.SetLineColor(kBlue)
for s in ["ST_s", "ST_t", "ST_at", "ST_tW", "ST_atW"]:
quickplot("/home/storage/andrzejnovak/March/"+s+".root", treename, ST, VAR[0], Cut, "("+lumi+"*weight)")
s = DSTGetEntries()
return d, q, w, t, s
for varname in ["LepPt","METPt", "TAGPt", "TPRIMEM", "WPt", "lepJetPt","ZPRIMEM"]:
d, q, w, t, s = cutcount(varname)
print d, q,w,t,s
|
tobiagru/ML | src/lib_IO.py | Python | gpl-3.0 | 3,590 | 0.032033 | import numpy as np
import pandas as pd
import h5py
import sys
import traceback
import logging
logging.basicConfig(stream=sys.stdout,level=logging.DEBUG)
# import/export functions --------------------------------------------------------------------
def load_Y(fname, usecols = [1], asNpArray = False):
if asNpArray:
return np.loadtxt(fname,
dtype = np.int32,
delimiter = ',',
skiprows = 1,
usecols = usecols)
else:
return pd.read_csv(fname,
index_col=0,
dtype=np.int32,
header=0,
usecols = [0] + list(usecols))
def load_X_train(fname, usecols = range(2,17,1), asNpArray = False):
if asNpArray:
return np.loadtxt(fname,
dtype = np.float32,
delimiter = ',',
skiprows = 1,
usecols = list(usecols))
else:
return pd.read_csv(fname,
index_col=0,
dtype=np.int16,
header=0,
usecols = [0] + list(usecols))
def load_X_test(fname, usecols = range(1,16,1), asNpArray = False):
if asNpArray:
return np.loadtxt(fname,
dtype = np.float32,
delimiter = ',',
skiprows = 1,
usecols = list(usecols))
else:
return pd.read_csv(fname,
index_col=0,
dtype=np.float32,
header=0,
usecols = list(usecols))
def load_Ids_test(fname):
return np.loadtxt(fname,
dtype = np.float32,
delimiter = ',',
skiprows = 1,
usecols = [0])
def load_h5_train(fname):
f = h5py.File(fname, 'r+')
ids = np.zeros(f["train/axis1"].shape, dtype=np.int32)
f["train/axis1"].read_direct(ids)
X_train = np.zeros(f["train/axis1"].shape, dtype=np.int32)
f["train/axis1"].read_direct(ids)
y_train = np.zeros(f["train/axis1"].shape, dtype=np.int32)
f["train/axis1"].read_direct(ids)
def write_Y(fname, Y_pred, X_test = 0, Ids = 0):
if X_test is not 0:
if Y_pred.shape[0] != X_test.as_matrix().shape[0]:
print("error X_test- dimension of y matrix does not match number of expected predictions")
print('y: {0} - expected: {1}'.format(Y_pred.shape,X_test.as_matrix().shape))
else:
data = pd.DataFrame(data = Y_p | red, index = X_test.index, columns = ['y'])
| f = open(fname, 'w+')
data.to_csv(f, header=["Id","Prediction"])
f.close()
elif Ids is not 0:
if Y_pred.shape[0] != Ids.shape[0]:
print("error Ids- dimension of y matrix does not match number of expected predictions")
print('y: {0} - expected: {1}'.format(Y_pred.shape,Ids.shape))
else:
f = open(fname, 'w+')
np.savetxt(fname=f,X= np.column_stack([Ids,Y_pred]),
fmt=['%d', '%d'],delimiter=',',header='Id,Prediction',comments='')
def log_best_param_score( date_time, clf_name, score, best_param):
logging.info('{0} - {1} - score: {2:.4f} - param: {3}\n'.format(date_time,clf_name,score,best_param))
def log_score(date_time, clf_name, score):
logging.info('{0} - {1} - score: {2:.4f}\n'.format(date_time,clf_name,score)) |
johnmgregoire/PythonCompositionPlots | quaternary_binary_lines.py | Python | bsd-3-clause | 2,643 | 0.020431 | import matplotlib.cm as cm
import numpy
import pylab
import operator, copy, os
#os.chdir('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')
from myquaternaryutility import QuaternaryPlot
class binarylines:
def __init__(self, ax, insetax, ellabels=['A', 'B', 'C', 'D'], offset=0.02, numcomppts=21, view_azim=-159, view_elev=30, **kwargs):
self.ax=ax
self.insetax=insetax
self.ellabels=ellabels
self.stpq=QuaternaryPlot(insetax, ellabels=ellabe | ls, offset=offset)
comppairs=[]
a=numpy.linspace(0, 1, 21)
count=-1
for i in range(4):
for j in range(i+1, 4):
| count+=1
b=numpy.zeros((numcomppts, 4), dtype='float64')
b[:, i]=a
b[:, j]=1.-a
comppairs+=[(c1, c2) for c1, c2 in zip(b[:-1], b[1:])]
for (c1, c2) in comppairs:
self.stpq.line(c1, c2, fmt='-', c=self.stpq.rgb_comp([(c1+c2)/2.])[0], **kwargs)
self.stpq.set_projection(azim=view_azim, elev=view_elev)
self.stpq.label()
def plotbinaryfom(self, comps, fom, **kwargs):
cb=comps>.001
ms=['<','>','^','v','s','D']
count=-1
for i in range(4):
for j in range(i+1, 4):
count+=1
k, l=tuple(set(range(4))-set([i, j]))
barr=numpy.array([numpy.logical_not(b[k]|b[l]) for b in cb]) #numpy.logical_xor(b[i], b[j])&
if not numpy.any(barr):
continue
cmps=comps[barr]
inds=numpy.argsort(cmps[:, j])
cmps=cmps[inds]
cols=self.stpq.rgb_comp(cmps)
ys=fom[barr][inds]
for count2, (c, col, y) in enumerate(zip(cmps, cols, ys)):
if count2==len(ys)//2:
self.ax.plot(c[j], y, marker=ms[count], c=col, markeredgecolor=col, label='%s,%s' %(self.ellabels[i], self.ellabels[j]), **kwargs)
else:
self.ax.plot(c[j], y, marker=ms[count], c=col, markeredgecolor=col, **kwargs)
#self.ax.plot(c[j], y, marker=ms[count], c=col, markeredgecolor='None')
for count3, (c1, col1, y1, c2, col2, y2) in enumerate(zip(cmps[:-1], cols[:-1], ys[:-1], cmps[1:], cols[1:], ys[1:])):
col=numpy.array([col1, col2]).mean(axis=0)
self.ax.plot([c1[j], c2[j]], [y1, y2], '-', c=col, **kwargs)
def binarylineslegend(self, **kwargs):
try:
self.ax.legend(**kwargs)
except:
pass
|
citrix/netscaler-ansible-modules | ansible-collections/adc/plugins/modules/citrix_adc_appfw_settings.py | Python | gpl-3.0 | 12,977 | 0.002543 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Citrix Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: citrix_adc_appfw_settings
short_description: Manage Citrix ADC Web Application Firewall settings.
description:
- Manage Citrix ADC Web Application Firewall settings.
- The module uses the NITRO API to make configuration changes to WAF settings on the target Citrix ADC.
- The NITRO API reference can be found at https://developer-docs.citrix.com/projects/netscaler-nitro-api/en/latest
- Note that due to NITRO API limitations this module will always report a changed status even when configuration changes have not taken place.
version_added: "1.0.0"
author:
- George Nikolopoulos (@giorgos-nikolopoulos)
- Sumanth Lingappa (@sumanth-lingappa)
options:
defaultprofile:
description:
- >-
Profile to use when a connection does not match any policy. Default setting is APPFW_BYPASS, which
unmatched connections back to the Citrix ADC without attempting to filter them further.
- "Minimum length = 1"
type: str
undefaction:
description:
- "Profile to use when an application firewall policy evaluates to undefined (UNDEF)."
- >-
An UNDEF event indicates an internal error condition. The APPFW_BLOCK built-in profile is the default
You can specify a different built-in or user-created profile as the UNDEF profile.
- "Minimum length = 1"
type: str
sessiontimeout:
description:
- >-
Timeout, in seconds, after which a user session is terminated. Before continuing to use the protected
site, the user must establish a new session by opening a designated start URL.
- "Minimum value = C(1)"
- "Maximum value = C(65535)"
type: str
learnratelimit:
description:
- >-
Maximum number of connections per second that the application firewall learning engine examines to
new relaxations f | or learning-enabled security checks. The application firewall drops any connections
this limit from the list of connections used by the learning engine.
- "Minimum value = C(1)"
- "Maxim | um value = C(1000)"
type: str
sessionlifetime:
description:
- >-
Maximum amount of time (in seconds) that the application firewall allows a user session to remain
regardless of user activity. After this time, the user session is terminated. Before continuing to
the protected web site, the user must establish a new session by opening a designated start URL.
- "Minimum value = C(0)"
- "Maximum value = C(2147483647)"
type: str
sessioncookiename:
description:
- "Name of the session cookie that the application firewall uses to track user sessions."
- >-
Must begin with a letter or number, and can consist of from 1 to 31 letters, numbers, and the hyphen
and underscore (_) symbols.
- "The following requirement applies only to the Citrix ADC CLI:"
- >-
If the name includes one or more spaces, enclose the name in double or single quotation marks (for
"my cookie name" or 'my cookie name').
- "Minimum length = 1"
type: str
clientiploggingheader:
description:
- >-
Name of an HTTP header that contains the IP address that the client used to connect to the protected
site or service.
type: str
importsizelimit:
description:
- >-
Cumulative total maximum number of bytes in web forms imported to a protected web site. If a user
to upload files with a total byte count higher than the specified limit, the application firewall
the request.
- "Minimum value = C(1)"
- "Maximum value = C(268435456)"
type: str
signatureautoupdate:
description:
- "Flag used to enable/disable auto update signatures."
type: bool
signatureurl:
description:
- "URL to download the mapping file from server."
type: str
cookiepostencryptprefix:
description:
- "String that is prepended to all encrypted cookie values."
- "Minimum length = 1"
type: str
logmalformedreq:
description:
- "Log requests that are so malformed that application firewall parsing doesn't occur."
type: bool
geolocationlogging:
description:
- "Enable Geo-Location Logging in CEF format logs."
type: bool
ceflogging:
description:
- "Enable CEF format logs."
type: bool
entitydecoding:
description:
- "Transform multibyte (double- or half-width) characters to single width characters."
type: bool
useconfigurablesecretkey:
description:
- "Use configurable secret key in AppFw operations."
type: bool
sessionlimit:
description:
- >-
Maximum number of sessions that the application firewall allows to be active, regardless of user
After the max_limit reaches, No more user session will be created .
- "Minimum value = C(0)"
- "Maximum value = C(500000)"
type: str
malformedreqaction:
elements: str
choices:
- 'none'
- 'block'
- 'log'
- 'stats'
description:
- "flag to define action on malformed requests that application firewall cannot parse."
type: list
extends_documentation_fragment: citrix.adc.citrixadc
'''
EXAMPLES = '''
- name: setup basic settings
delegate_to: localhost
citrix_adc_appfw_settings:
nitro_user: nsroot
nitro_pass: nsroot
nsip: 172.18.0.2
state: present
defaultprofile: APPFW_BYPASS
undefaction: APPFW_BLOCK
sessiontimeout: "1000"
learnratelimit: "500"
sessionlifetime: "2000"
sessioncookiename: cookie_name
clientiploggingheader: header_name
importsizelimit: "268435456"
signatureautoupdate: on
signatureurl: http://signature.url
cookiepostencryptprefix: prepend
logmalformedreq: on
geolocationlogging: on
ceflogging: on
entitydecoding: on
useconfigurablesecretkey: on
sessionlimit: "10000"
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detail |
JayTeeGeezy/pypunters | pypunters/html_utils.py | Python | mit | 5,608 | 0.018545 | import re
def get_attributes(parent, selector, attribute):
"""Get a list of attribute values for child elements of parent matching the given CSS selector"""
return [child.get(attribute) for child in parent.cssselect(selector)]
def get_attribute(parent, selector, attribute, index=0):
"""Get the attribute value for the child element of parent matching the given CSS selector
If index is specified, return the attribute value for the matching child element with the specified zero-based index; otherwise, return the attribute value for the first matching child element.
If selector is None, return the attribute value for parent instead.
"""
if selector is None:
return parent.get(attribute)
else:
values = get_attributes(parent, selector, attribute)
if (index < 0 and len(values) >= abs(index)) or (index >= 0 and len(values) > index):
return values[index]
def parse_attributes(parent, selector, attribute, parser):
"""Parse a list of attribute values for child elements of parent matching the given CSS selector"""
values = []
for value in get_attributes(parent, selector, attribute):
try:
values.append(parser(value))
except ValueError:
values.append(None)
return values
def parse_attribute(parent, selector, attribute, parser, index=0):
"""Parse the attribute value for the child element of parent matching the given CSS selector
If index is specified, parse the attribute value for the matching child element with the specified zero-based index; otherwise, parse the attribute value for the first matching child element.
If selector is None, parse the attribute value for parent instead.
"""
value = get_attribute(parent, selector, attribute, index)
if value is not None:
try:
return parser(value)
except ValueError:
return None
def get_child(parent, selector, index=0):
"""Get the child element of parent as specified by the given CSS selector
If index is specified, return the matching child element at the specified zero-based index; otherwise, return the first matching child element.
"""
children = parent.cssselect(selector)
if (index < 0 and len(children) >= abs(index)) or (index >= 0 and len(children) > index):
return children[index]
def get_child_text(parent, selector, index=0):
"""Get the text content of the child element of parent as specified by the given CSS selector
If index is specified, return the text content of the matching child element at the specified zero-based index; otherwise, return the text content of the first matching child element.
"""
child = get_child(parent, selector, index)
if child is not None:
return child.text_content().strip()
def parse_child_text(parent, selector, parser, index=0):
"""Parse the text content of the child element of parent as specified by the given CSS selector
If index is specified, parse the text content of the matching child element at the specified zero-based index; otherwise, parse the text content of the first matching child element.
"""
text = get_child_text(parent, selector, index)
if text is not None:
try:
return parser(text)
except ValueError:
return None
def get_child_match(parent, selector, pattern, index=0):
"""Get a regex match for the text content of the child element of parent as specified by the given CSS selector
If index is specified, return the regex match for the text content of the matching child element at the specified zero-based index; otherwise, return the regex match for the text content of the first matching child element.
"""
text = get_child_text(parent, selector, index)
if text is not None:
return re.search(pattern, text)
def get_child_match_groups(parent, selector, pattern, index=0):
"""Get regex match groups for the text content of the child element of parent as specified by the given CSS selector
If index is specified, return the regex match groups for the text content of the matching child element at the specified zero-based index; otherwise, return the regex match groups for the text content of the first matching child element.
"""
match = get_child_match(parent, selector, pattern, index)
if match is not None:
return match.groups()
def get_child_match_group(parent, selector, pattern, child_index=0, group_index=0):
"""Get the regex match group with group_index for the text content of the child element of parent as specified by the given CSS selector
If child_index is specified, return the r | egex match | group with group_index for the text content of the child element at the specified zero-based index; otherwise, return the regex match group with group_index for the text content of the first matching child element.
"""
groups = get_child_match_groups(parent, selector, pattern, child_index)
if groups is not None and ((group_index < 0 and len(groups) >= abs(group_index)) or (group_index >= 0 and len(groups) > group_index)):
return groups[group_index]
def parse_child_match_group(parent, selector, pattern, parser, child_index=0, group_index=0):
"""Parse the regex match group with group_index for the text content of the child element of parent as specified by the given CSS selector
If child_index is specified, parse the regex match group with group_index for the text content of the child element at the specified zero-based index; otherwise, parse the regex match group with group_index for the text content of the first matching child element.
"""
value = get_child_match_group(parent, selector, pattern, child_index, group_index)
if value is not None:
try:
return parser(value)
except ValueError:
return None |
zerotk/terraformer | zerotk/fifo.py | Python | mit | 1,072 | 0.00653 | from __future__ import unicode_literals
from collections import OrderedDict
class FIFO(OrderedDict):
"""
This is a First in, First out cache, so, when the maximum size is reached, the first item added
is removed.
"""
def __init__(self, maxsize):
"""
:param int maxsize:
The maximum size of this cache.
"""
OrderedDict.__init__(self)
self._maxsize = maxsize
def __setitem__ | (self, key, value):
"""
Sets an item in the cache. Pops items as needed so that the max size is never passed.
:param object key:
Key to be set
:param ob | ject value:
Corresponding value to be set for the given key
"""
l = len(self)
# Note, we must pop items before adding the new one to the cache so that
# the size does not exceed the maximum at any time.
while l >= self._maxsize:
l -= 1
# Pop the first item created
self.popitem(0)
OrderedDict.__setitem__(self, key, value)
|
CloudHeads/lambda_utils | lambda_utils/response_handlers/__init__.py | Python | mit | 305 | 0 | import logging
class BaseResponseHandler:
def on_execution(self, event):
logging.debug(event)
return event
def on_exception(self, ex):
logging.exception(str(ex) | )
raise
def on_response(self, response):
logging.debug(response)
re | turn response
|
joshua-cogliati-inl/raven | tests/framework/ensembleModelTests/EM_A_dummy.py | Python | apache-2.0 | 681 | 0.002937 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Li | cense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable | law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def run(self, Input):
self.sumTemperature = self.leftTemperature + self.rightTemperature
|
OrhanOdabasi/PixPack | pixpack/process.py | Python | mit | 3,262 | 0.003372 | #!/usr/bin/env python3
# process.py
# This script consists of all core functions.
# Author: Orhan Odabasi (0rh.odabasi[at]gmail.com)
import locale
import csv
import os
from PIL import Image
import re
from collections import Counter
def scanDir(path):
# scan the path and collect media data for copy process
while os.path.exists(path) and os.path.isdir(path):
photos_dataset, totalsize, folder_count, videos_dataset = listphotos(path)
p_count = len(photos_dataset)
p_size = "{:.2f} MB".format(float(totalsize/1000000))
return p_count, p_size, folder_count, photos_dataset, videos_dataset
def saveReport(photo_datas, video_datas, target_path):
# save summary data to a csv file
report_dest_p = os.path.join(target_path, "photo_list.csv")
report_dest_v = os.path.join(target_path, "video_list.csv")
with open(report_dest_p, "w") as f:
w = csv.writer(f, delimiter="\t")
w.writerows(photo_datas)
| f.close()
with open(report_dest_v, "w") as f:
w = csv.writer(f, delimiter="\t")
w.writerows(video_datas)
f.close()
def listphotos(path):
# Listing all files in target directory
pho | tos_dataset = []
videos_dataset = []
for root, dirs, files in os.walk(path):
for name in files:
p_data_list = []
v_data_list = []
# filename name [0]
file_name = name
# file path [1]
file_path = os.path.join(root, file_name)
# file size [2]
file_size = os.path.getsize(file_path)
try:
# date taken [3]
date_taken = Image.open(file_path)._getexif()[36867]
# year/month/day format required
ymd_format = re.match("(\d{4}):(\d{2}):(\d{2})", date_taken)
# year taken [4]
year = ymd_format.group(1)
# month taken [5]
month = ymd_format.group(2)
# day taken [6]
day = ymd_format.group(3)
# date info will be our new folder name
date_info = "{0}-{1}".format(year, month)
except:
date_taken = "NOT_FOUND"
day = "NOT_FOUND"
year = "NOT_FOUND"
month = "NOT_FOUND"
# destination folder name [7]
date_info = "NOT_FOUND"
if name.lower().endswith((".jpeg", ".jpg", ".png", ".dng")):
p_data_list.extend([file_name, file_path, file_size, date_taken, year, month, day, date_info])
photos_dataset.append(p_data_list)
elif name.lower().endswith((".mov", ".mkv", ".mp4", ".3gp", ".wmv", ".avi")):
v_data_list.extend([file_name, file_path, file_size, date_taken, year, month, day, date_info])
videos_dataset.append(v_data_list)
# total size of photos archive (only jpeg and png files)
totalsize = 0
for s in photos_dataset:
totalsize += int(s[2])
#total file count
dirs = []
for x in photos_dataset:
dirs.append(x[7])
foldercount = len(Counter(dirs).most_common())
return photos_dataset, totalsize, foldercount, videos_dataset
|
wuga214/FullyConnectedDeepNeuralNetwork | ANN/src/utils/tools.py | Python | mit | 135 | 0.037037 | '''
Created on Feb 6, 2016
@author: Wuga
'''
import numpy as np
| def add_ones(A):
| return np.hstack(( np.ones((A.shape[0],1)), A )) |
SPP1665DataAnalysisCourse/elephant | elephant/surrogates.py | Python | bsd-3-clause | 15,562 | 0.000771 | #--------------------------------------------------------------------------
# NAME : spiketrains_utils.py:
# DESCRIPTION : routines for generating and binning time series, extracting
# information and generating surrogates
# AUTHOR : Emiliano Torre
# CREATED : September 12, 2012
#--------------------------------------------------------------------------
import numpy as np
import quantities as pq
import neo
def spi | ke_dithering(x, dither, n=1, decimals=None, edges='['):
"""
Generates surrogates of a spike train by spike dithering.
The surrogates are obtained by uniformly dithering times around the
| original position. The dithering is performed independently for each
surrogate.
The surrogates retain the t_start and t_stop of the original spike train.
Spikes moved beyond this range are lost or moved to the range's ends,
depending on the parameter edge.
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
dither : Quantity
amount of dithering. A spike at time t is placed randomly within
]t-dither, t+dither[.
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
edges : str (optional)
For surrogate spikes falling outside the range [x.t_start, x.t_stop),
whether to drop them out (for edges = '[' or 'cliff') or set
that to the range's closest end (for edges = ']' or 'wall').
Default: '['
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly dithering
its spikes. The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print spike_dithering(st, dither = 20*pq.ms)
[<SpikeTrain(array([ 96.53801903, 248.57047376, 601.48865767,
815.67209811]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_dithering(st, dither = 20*pq.ms, n=2)
[<SpikeTrain(array([ 104.24942044, 246.0317873 , 584.55938657,
818.84446913]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 111.36693058, 235.15750163, 618.87388515,
786.1807108 ]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_dithering(st, dither = 20*pq.ms, decimals=0)
[<SpikeTrain(array([ 81., 242., 595., 799.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Transform x into a Quantity object (needed for matrix algebra)
data = x.view(pq.Quantity)
# Main: generate the surrogates
surr = data.reshape((1, len(data))) + 2 * dither * \
np.random.random_sample((n, len(data))) - dither
# Round the surrogate data to decimal position, if requested
if decimals is not None:
surr = surr.round(decimals)
if edges in (']', 'wall'):
# Move all spikes outside [x.t_start, x.t_stop] to the range's ends
surr = np.minimum(np.maximum(surr.base,
(x.t_start / x.units).base), (x.t_stop / x.units).base) * x.units
elif edges in ('[', 'cliff'):
# Leave out all spikes outside [x.t_start, x.t_stop]
Tstart, Tstop = (x.t_start / x.units).base, (x.t_stop / x.units).base
surr = [s[np.all([s >= Tstart, s < Tstop], axis=0)] * x.units
for s in surr.base]
# Return the surrogates as SpikeTrains
return [neo.SpikeTrain(s, t_start=x.t_start, t_stop=x.t_stop).rescale(
x.units) for s in surr]
def spike_time_rand(x, n=1, decimals=None):
"""
Generates surrogates of a spike trains by spike time randomisation.
The surrogates are obtained by keeping the spike count of the original
spike train x, but placing them randomly into the interval
[x.t_start, x.t_stop].
This generates independent Poisson SpikeTrains (exponentially distributed
inter-spike intervals) while keeping the spike count as in x.
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly dithering
its spikes. The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print spike_time_rand(st)
[<SpikeTrain(array([ 131.23574603, 262.05062963, 549.84371387,
940.80503832]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_time_rand(st, n=2)
[<SpikeTrain(array([ 84.53274955, 431.54011743, 733.09605806,
852.32426583]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 197.74596726, 528.93517359, 567.44599968,
775.97843799]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_time_rand(st, decimals=0)
[<SpikeTrain(array([ 29., 667., 720., 774.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Create surrogate spike trains as rows of a Quantity array
sts = ((x.t_stop - x.t_start) * np.random.random(size=(n, len(x))) + \
x.t_start).rescale(x.units)
# Round the surrogate data to decimal position, if requested
if decimals is not None:
sts = sts.round(decimals)
# Convert the Quantity array to a list of SpikeTrains, and return them
return [neo.SpikeTrain(np.sort(st), t_start=x.t_start, t_stop=x.t_stop)
for st in sts]
def isi_shuffling(x, n=1, decimals=None):
"""
Generates surrogates of a spike trains by inter-spike-interval (ISI)
shuffling.
The surrogates are obtained by keeping the randomly sorting the ISIs of
the original spike train x.
This generates independent SpikeTrains with same ISI distribution
and spike count as in x, while destroying temporal dependencies and
firing rate profile.
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly ISI shuffling.
The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print isi_shuffling(st)
[<SpikeTrain(array([ 200., 350., 700., 800.]) * ms,
[0.0 ms, 1000.0 ms])>]
>>> print isi_shuffling(st, n=2)
[<SpikeTrain(array([ 100., 300., 450., 800.]) * ms,
[0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 200., 350., 700., 800.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Compute ISIs of x as a numpy array (meant in units of x)
x_dl = x.magnitude
if len(x) > 0:
isi0 = x[0] - x.t_start
ISIs = np.hstack([isi0.magnitude, np.diff(x_dl)])
# Round the ISIs to decimal position, if requested
if decimals is not None:
ISIs = ISIs.round(decimals)
# Create list of surrogate spike trains by random ISI permutation
sts = []
for i in xrange(n):
surr_times = np.cumsum(np.random.permutation(ISIs)) * x.units + \
x.t_start
sts.append(neo.SpikeTrain(
surr_times, t_start=x.t_start, t_stop=x.t_sto |
kaedroho/wagtail | wagtail/admin/tests/pages/test_workflow_history.py | Python | bsd-3-clause | 2,629 | 0.004184 | from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.core.models import Page
from wagtail.tests.utils import WagtailTestUtils
class TestWorkflowHistoryDetail(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.create_test_user | ()
self.login(self.user)
self.christmas_event = Page.objects.get(url_path='/home/events/christmas/')
self.christmas_event.save_revision()
workflow = self.christmas_event.get_workflow()
self.workflow_state = workflow.start(self.christmas_event, self.user)
def test_get_index(self):
response = self.clie | nt.get(
reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id])
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('wagtailadmin_pages:edit', args=[self.christmas_event.id]))
self.assertContains(response, reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id]))
def test_get_index_with_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id])
)
self.assertEqual(response.status_code, 302)
def test_get_detail(self):
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id])
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('wagtailadmin_pages:edit', args=[self.christmas_event.id]))
self.assertContains(response, reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id]))
def test_get_detail_with_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id])
)
self.assertEqual(response.status_code, 302)
|
svox1/e2openplugin-OpenWebif | plugin/controllers/models/info.py | Python | gpl-2.0 | 22,829 | 0.030492 | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
from Plugins.Extensions.OpenWebif.__init__ import _
from Components.About import about
from Components.config import config
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.Network import iNetwork
from Components.Language import language
from RecordTimer import parseEvent
from Screens.Standby import inStandby
from timer import TimerEntry
from Tools.Directories import fileExists, pathExists
from time import time, localtime, strftime
from enigma import eDVBVolumecontrol, eServiceCenter, eServiceReference, eEnv
from twisted.web import version
from socket import has_ipv6, AF_INET6, AF_INET, inet_ntop, inet_pton, getaddrinfo
try:
from boxbranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
from enigma import getEnigmaVersionString
except:
from owibranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
def getEnigmaVersionString():
return about.getEnigmaVersionString()
import NavigationInstance
import os
import sys
import time
import string
OPENWEBIFVER = "OWIF 1.0.2"
STATICBOXINFO = None
def getOpenWebifVer():
return OPENWEBIFVER
def getFriendlyImageDistro():
dist = getImageDistro().replace("openatv","OpenATV").replace("openhdf","OpenHDF")
return dist
def getIPMethod(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipmethod = _("SLAAC")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet6" in line and iface in line:
if "static" in line:
ipmethod = _("static")
if "dhcp" in line:
ipmethod = _("DHCP")
if "manual" in line:
ipmethod = _("manual/disabled")
if "6to4" in line:
ipmethod = "6to4"
return ipmethod
def getIPv4Method(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipv4method = _("static")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in file(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet " in line and iface in line:
if "static" in line:
ipv4method = _("static")
if "dhcp" in line:
ipv4method = _("DHCP")
if "manual" in line:
ipv4method = _("manual/disabled")
return ipv4method
def getLinkSpeed(iface):
speed = _("unknown")
try:
speed = os.popen('ethtool ' + iface + ' | grep Speed: | awk \'{ print $2 }\'').read().strip()
except:
pass
speed = str(speed)
speed = speed.replace("Mb/s"," MBit/s")
speed = speed.replace("10000 MBit/s","10 GBit/s")
speed = speed.replace("1000 MBit/s","1 GBit/s")
return speed
def getNICChipSet(iface):
nic = _("unknown")
try:
nic = os.popen('ethtool -i ' + iface + ' | grep driver: | awk \'{ print $2 }\'').read().strip()
except:
pass
nic = str(nic)
return nic
def getFriendlyNICChipSet(iface):
friendlynic = getNICChipSet(iface)
friendlynic = friendlynic.replace("bcmgenet", "Broadcom Generic Gigabit Ethernet")
return friendlynic
def normalize_ipv6(orig):
net = []
if '/' in orig:
net = orig.split('/')
if net[1] == "128":
del net[1]
else:
net.append(orig)
addr = net[0]
addr = inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
if len(net) == 2:
addr += "/" + net[1]
return (addr)
def getAdapterIPv6(ifname):
addr = _("IPv4-only kernel")
firstpublic = None
if fileExists('/proc/net/if_inet6'):
addr = _("IPv4-only Python/Twisted")
if has_ipv6 and version.major >= 12:
proc = '/proc/net/if_inet6'
tempaddrs = []
for line in file(proc).readlines():
if line.startswith('fe80'):
continue
tmpaddr = ""
tmp = line.split()
if ifname == tmp[5]:
tmpaddr = ":".join([ tmp[0][i:i+4] for i in range(0,len(tmp[0]),4) ])
if firstpublic is None and (tmpaddr.startswith('2') or tmpaddr.startswith('3')):
firstpublic = normalize_ipv6(tmpaddr)
if tmp[2].lower() != "ff":
tmpaddr = "%s/%s" % (tmpaddr, int(tmp[2].lower(), 16))
tmpaddr = normalize_ipv6(tmpaddr)
tempaddrs.append(tmpaddr)
if len(tempaddrs) > 1:
tempaddrs.sort()
addr = ', '.join(tempaddrs)
elif len(tempaddrs) == 1:
addr = tempaddrs[0]
elif len(tempaddrs) == 0:
addr = _("none/IPv4-only network")
return {'addr':addr, 'firstpublic':firstpublic }
def formatIp(ip):
if ip is None or len(ip) != 4:
return "0.0.0.0"
return "%d.%d.%d.%d" % (ip[0], ip[1], ip[2], ip[3])
def getBasePath():
path = os.path.dirname(sys.modules[__name__].__file__)
chunks = path.split("/")
chunks.pop()
chunks.pop()
return "/".join(chunks)
def getPublicPath(file = ""):
return getBasePath() + "/public/" + file
def getViewsPath(file = ""):
return getBasePath() + "/controllers/views/" + file
def getPiconPath():
if pathExists("/media/usb/picon/"):
return "/media/usb/picon/"
elif pathExists("/media/cf/picon/"):
return "/media/cf/picon/"
elif pathExists("/media/hdd/picon/"):
return "/media/hdd/picon/"
elif pathExists("/usr/share/enigma2/picon/"):
return "/usr/share/enigma2/picon/"
elif pathExists("/picon/"):
return "/picon/"
else:
return ""
def getInfo(session = None):
# TODO: get webif versione somewhere!
info = {}
info['brand'] = getMachineBrand()
info['model'] = getMachineName()
info['boxtype'] = getBoxType()
info['machinebuild'] = getMachineBuild()
chipset = "unknown"
if fileExists("/etc/.box"):
f = open("/etc/.box",'r')
model = f.readline().strip().lower()
f.close()
if model.startswith("ufs") or model.startswith("ufc"):
if model in ("ufs910", "ufs922", "ufc960"):
chipset = "SH4 @266MHz"
else:
chipset = "SH4 @450MHz"
elif model in ("topf", "tf7700hdpvr"):
chipset = "SH4 @266MHz"
elif model.startswith("azbox"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "SH4 @540MHz"
else:
chipset = "SH4 @450MHz"
elif fileExists("/proc/stb/info/azmodel"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model",'r')
model = f.readline().strip().lower()
f.close()
if model == "tf7700hdpvr":
chipset = "SH4 @266MHz"
elif model == "nbox":
chipset = "STi7100 @266MHz"
elif model == "arivalink200" | :
chipset = "STi7109 @266MHz"
elif model in ("adb2850", "adb2849", "dsi87"):
chipset = "STi7111 @450MHz"
elif model in ("sagemcom88", "esi88"):
chipset = "STi7105 @450MHz"
elif model.startswith("spark"):
| if model == "spark7162":
chipset = "STi7162 @540MHz"
else:
chipset = "STi7111 @450MHz"
if fileExists("/proc/stb/info/chipset"):
f = open("/proc/stb/info/chipset",'r')
chipset = f.readline().strip()
f.close()
info['chipset'] = chipset
memFree = 0
for line in open("/proc/meminfo",'r'):
parts = line.split(':')
key = parts[0].strip()
if key == "MemTotal":
info['mem1'] = parts[1].strip().replace("kB", _(" |
StefanoFenu/colorizer | app.py | Python | gpl-3.0 | 2,268 | 0.007937 | from flask import Flask, request, render_template, redirect, url_for, jsonify, send_from_directory
from celery import Celery
from colorize import process_image
app = Flask(__name__)
# Celery configuration
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'
# Initialize Celery
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
@celery.task(bind=True)
def run_colorizer(self, image):
self.update_state(state='PROGRESS', meta={'current':0, 'total':100,
'status':'Coloring'})
output = process_image(image)
return {'current': 100, 'total': 100, 'state':'DONE', 'status': 'DONE', 'result':output}
@app.route('/index', methods=['GET', 'POST'])
@app.rout | e('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
return redirect(url_for('index'))
@app.route('/color', methods=['POST'])
def color():
image = request.json
task = run_colorizer.s(image).apply_async()
return jsonify({}), 202, {'Location': url_for('taskstatus', task_id=task.id)}
@app.route('/js/<path:path>')
def js(path):
return send_from_directory('js', path)
@app.route('/style/ | <path:path>')
def style(path):
return send_from_directory('style', path)
@app.route('/status/<task_id>')
def taskstatus(task_id):
task = run_colorizer.AsyncResult(task_id)
if task.state == 'PENDING':
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending.'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True)
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractAbysslibraryWordpressCom.py | Python | bsd-3-clause | 739 | 0.028417 | def extractAbysslibraryWordpressCom(item):
'''
Parser for 'abysslibrary.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Doomsday Carnival', 'Doomsday Carnival', 'translated'),
('Fierce Silk Flower', 'Fierce Silk Flower', 'translated'),
('PRC | ', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, | chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
kinow-io/kinow-python-sdk | test/test_blog_category_response.py | Python | apache-2.0 | 813 | 0.00246 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
| """
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.blog_category_response import BlogCategoryResponse
class TestBlogCategoryResponse(unittest.TestCase):
""" BlogCategoryResponse unit test stub | s """
def setUp(self):
pass
def tearDown(self):
pass
def testBlogCategoryResponse(self):
"""
Test BlogCategoryResponse
"""
model = kinow_client.models.blog_category_response.BlogCategoryResponse()
if __name__ == '__main__':
unittest.main()
|
dvl/cookiecutter-django-clean-template | {{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}/urls.py | Python | mit | 413 | 0 | from django.conf import settings
from django.contrib import admin
f | rom django.views.generic import TemplateView
from django.urls import include, path
urlpatterns = [
path('', TemplateV | iew.as_view(template_name='base.html'), name='index'),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
|
datapythonista/pandas | pandas/tests/window/test_dtypes.py | Python | bsd-3-clause | 5,077 | 0.001379 | import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.core.base import DataError
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
def get_dtype(dtype, coerce_int=None):
if coerce_int is False and "int" in dtype:
return None
if dtype != "category":
return np.dtype(dtype)
return dtype
@pytest.mark.parametrize(
"method, data, expected_data, coerce_int, min_periods",
[
("count", np.arange(5), [1, 2, 2, 2, 2], True, 0),
("count", np.arange(10, 0, -2), [1, 2, 2, 2, 2], True, 0),
("count", [0, 1, 2, np.nan, 4], [1, 2, 2, 1, 1], False, 0),
("max", np.arange(5), [np.nan, 1, 2, 3, 4], True, None),
("max", np.arange(10, 0, -2), [np.nan, 10, 8, 6, 4], True, None),
("max", [0, 1, 2, np.nan, 4], [np.nan, 1, 2, np.nan, np.nan], False, None),
("min", np.arange(5), [np.nan, 0, 1, 2, 3], True, None),
("min", np.arange(10, 0, -2), [np.nan, 8, 6, 4, 2], True, None),
("min", [0, 1, 2, np.nan, 4], [np.nan, 0, 1, np.nan, np.nan], False, None),
("sum", np.arange(5), [np.nan, 1, 3, 5, 7], True, None),
("sum", np.arange(10, 0, -2), [np.nan, 18, 14, 10, 6], True, None),
("sum", [0, 1, 2, np.nan, 4], [np.nan, 1, 3, np.nan, np.nan], False, None),
("mean", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("mean", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
("mean", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False, None),
("std", np.arange(5), [np.nan] + [np.sqrt(0.5)] * 4, True, None),
("std", np.arange(10, 0, -2), [np.nan] + [np.sqrt(2)] * 4, True, None),
(
"std",
[0, 1, 2, np.nan, 4],
[np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2,
False, |
None,
),
("var", np.arange(5), [np.nan, 0.5, 0.5, 0.5, 0.5], True, None),
("var", np.arange(10, 0, -2), [np.nan, 2, 2, 2, 2], True, None),
("var", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 0.5, np.nan, np.nan], False, None),
("median", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("median", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
(
"media | n",
[0, 1, 2, np.nan, 4],
[np.nan, 0.5, 1.5, np.nan, np.nan],
False,
None,
),
],
)
def test_series_dtypes(method, data, expected_data, coerce_int, dtypes, min_periods):
s = Series(data, dtype=get_dtype(dtypes, coerce_int=coerce_int))
if dtypes in ("m8[ns]", "M8[ns]") and method != "count":
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
getattr(s.rolling(2, min_periods=min_periods), method)()
else:
result = getattr(s.rolling(2, min_periods=min_periods), method)()
expected = Series(expected_data, dtype="float64")
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"method, expected_data, min_periods",
[
("count", {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, 0),
(
"max",
{0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])},
None,
),
(
"min",
{0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])},
None,
),
(
"sum",
{0: Series([np.nan, 2, 6, 10, 14]), 1: Series([np.nan, 4, 8, 12, 16])},
None,
),
(
"mean",
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
None,
),
(
"std",
{
0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4),
},
None,
),
(
"var",
{0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])},
None,
),
(
"median",
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
None,
),
],
)
def test_dataframe_dtypes(method, expected_data, dtypes, min_periods):
if dtypes == "category":
pytest.skip("Category dataframe testing not implemented.")
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=get_dtype(dtypes))
if dtypes in ("m8[ns]", "M8[ns]") and method != "count":
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
getattr(df.rolling(2, min_periods=min_periods), method)()
else:
result = getattr(df.rolling(2, min_periods=min_periods), method)()
expected = DataFrame(expected_data, dtype="float64")
tm.assert_frame_equal(result, expected)
|
graphql-python/graphene | examples/complex_example.py | Python | mit | 1,461 | 0 | import graphene
class GeoInput(graphene.InputObjectType):
lat = graphene.Float(required=True)
lng = graphene.Float(required=True)
@property
def latlng(self):
return f"({self.lat},{self.lng})"
class Address(gra | phene.ObjectType):
latlng = graphene.String()
class Query(graphene.ObjectType):
address = graphene.Field(Address, geo=GeoInput(required=True))
def resolve_address(root, info, geo):
return Address(latlng=geo.latlng)
class CreateAddress(graphene.Mutation):
class Arguments:
geo = GeoInput(required=True)
Output = Address
def mutate(root, info, geo):
return Address(latlng=geo.latlng) |
class Mutation(graphene.ObjectType):
create_address = CreateAddress.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
query = """
query something{
address(geo: {lat:32.2, lng:12}) {
latlng
}
}
"""
mutation = """
mutation addAddress{
createAddress(geo: {lat:32.2, lng:12}) {
latlng
}
}
"""
def test_query():
result = schema.execute(query)
assert not result.errors
assert result.data == {"address": {"latlng": "(32.2,12.0)"}}
def test_mutation():
result = schema.execute(mutation)
assert not result.errors
assert result.data == {"createAddress": {"latlng": "(32.2,12.0)"}}
if __name__ == "__main__":
result = schema.execute(query)
print(result.data["address"]["latlng"])
|
Scalr/scalr-ctl | scalrctl/commands/farm_role_gv.py | Python | apache-2.0 | 314 | 0.006369 | __author__ = 'Dmitriy Korsakov'
__doc__ = 'Manage global variables for farm roles'
from scalrctl import commands
class UpdateFarmRoleGlobalVariable(commands.Action):
prompt_for = ["roleId", "globalVariableName"]
class | DeleteFarmRoleGlobalVariable(commands | .Action):
delete_target = 'globalVariableName' |
Southpaw-TACTIC/TACTIC | src/tactic/ui/examples/__init__.py | Python | epl-1.0 | 823 | 0.001215 | ###########################################################
#
# Copyright (c) 2005-2009, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology | , and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from ui_playground_panel_wdg import *
from font_palettes_example_wdg import *
from panning_scroll_example_wdg import *
from menu_examples_wdg import *
from event_examples_wdg import *
from misc_examples_wdg import *
from fx_anim_examples_wdg import *
from keyboard_handler_examples_wdg import *
from search_class_tag_examples_wdg import *
from | efficient_table_example_wdg import *
from dev_sandbox_01_wdg import *
from dev_sandbox_02_wdg import *
from dev_sandbox_03_wdg import *
|
benschmaus/catapult | telemetry/telemetry/internal/util/ts_proxy_server.py | Python | bsd-3-clause | 4,604 | 0.008036 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Start and stop tsproxy."""
import logging
import os
import re
import subprocess
import sys
from telemetry.core import util
from telemetry.internal.util import atexit_with_log
import py_utils
_TSPROXY_PATH = os.path.join(
util.GetTelemetryThirdPartyDir(), 'tsproxy', 'tsproxy.py')
def ParseTsProxyPortFromOutput(output_line):
port_re = re.compile(
r'Started Socks5 proxy server on '
r'(?P<host>[^:]*):'
r'(?P<port>\d+)')
m = port_re.match(output_line.strip())
if m:
return int(m.group('port'))
class TsProxyServer(object):
"""Start and Stop Tsproxy.
TsProxy provides basic latency, download and upload traffic shaping. This
class provides a programming API to the tsproxy script in
telemetry/third_party/tsproxy/tsproxy.py
"""
def __init__(self, host_ip=None, http_port=None, https_port=None):
"""Initialize TsProxyServer.
"""
self._proc = None
self._port = None
self._is_running = False
self._host_ip = host_ip
assert bool(http_port) == bool(https_port)
self._http_port = http_port
self._https_port = https_port
@property
def port(self):
return self._port
def StartServer(self, timeout=10):
"""Start TsProxy server and verify that it started.
"""
cmd_line = [sys.executable, _TSPROXY_PATH]
cmd_line.extend([
'--port=0']) # Use port 0 so tsproxy picks a random available port.
if self._host_ip:
cmd_line.append('--desthost=%s' % self._host_ip)
if self._http_port:
cmd_line.append(
'--mapports=443:%s,*:%s' % (self._https_port, self._http_port))
logging.info('Tsproxy commandline: %r' % cmd_line)
self._proc = subprocess.Popen(
cmd_line, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=1)
atexit_with_log.Register(self.StopServer)
try:
py_utils.WaitFor(self._IsStarted, timeout)
logging.info('TsProxy port: %s', self._port)
self._is_running = True
except py_utils.TimeoutException:
err = self.StopServer()
raise RuntimeError(
'Error starting tsproxy: %s' % err)
def _IsStarted(self):
assert not self._is_running
assert self._proc
if self._proc.poll() is not None:
return False
self._proc.stdout.flush()
self._port = ParseTsProxyPortFromOutput(
output_line=self._proc.stdout.readline())
return self._port != None
def _IssueCommand(self, command_string, timeout):
logging.info('Issuing command to ts_proxy_server: %s', command_string)
command_output = []
self._proc.stdin.write('%s\n' % command_string)
self._proc.stdin.flush()
self._proc.stdout.flush()
def CommandStatusIsRead():
command_output.append(self._proc.stdout.readline().strip())
return (
command_output[-1] == 'OK' or command_output[-1] == 'ERROR')
py_utils.WaitFor(CommandStatusIsRead, timeout)
if not 'OK' in command_output:
raise RuntimeError('Failed to execute command %s:\n%s' %
(repr(command_string), '\n'.join(command_output)))
def UpdateOutboundPorts(self, http_port, https_port, timeout=5):
assert http_port and https_port
assert http_port != https_port
assert isinstance(http_port, int) and isinstance(https_port, int)
assert 1 <= http_port <= 65535
assert 1 <= https_port <= 65535
self._IssueCommand('set mapports 443:%i,*:%i' % (https_port, http_port),
timeout)
def UpdateTrafficSettings(self, round_trip_latency_ms=0,
download_bandwidth_kbps=0, upload_bandwidth_kbps=0, timeout=5):
self._IssueCommand('set rtt %s' % round_trip_latency_ms, timeout)
self._IssueCommand('set inkbps %s' % download_bandwidth_kbps, timeout)
self._IssueCommand('set outkbps %s' % upload_b | andwidth_kbps, timeout)
def StopServer(self):
"""Stop TsProxy Server."""
if not self._is_running:
logging.debug('Attempting to stop TsProxy server that is not running.')
return
if self._proc:
self._proc.terminate()
self._proc.wait()
err = self._proc.stderr.read()
self._p | roc = None
self._port = None
self._is_running = False
return err
def __enter__(self):
"""Add support for with-statement."""
self.StartServer()
return self
def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
"""Add support for with-statement."""
self.StopServer()
|
buildinspace/peru | peru.py | Python | mit | 411 | 0 | #! /usr/bin/env python3
# This script is for running peru directly from the repo, mainly fo | r
# development. This isn't what gets installed when you install peru. That would
# be a script generated by setup.py, which calls peru.main.main().
import os
import sys
repo_root = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, repo_roo | t)
import peru.main # noqa: E402
sys.exit(peru.main.main())
|
andremilke/utility | portscan.py | Python | gpl-3.0 | 344 | 0 | import socket
import sys
if (len(sys.argv) < 2):
print("How to use portscan")
print(sys.argv[0], "10.1.1.1")
else:
for Port in range(1, 65535):
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
| i | f (mysocket.connect_ex((sys.argv[1], Port)) == 0):
print("Port ", Port, "[OPENED]")
|
anthonyalmarza/ngen | ngen/tests/test_models.py | Python | mit | 18,264 | 0.000493 | from __future__ import unicode_literals, print_function, absolute_import
import unittest
try:
import mock
except ImportError:
from unittest import mock
from ngen.exceptions import ValidationError
from ngen.models import (
BaseOptions,
BooleanField,
CharField,
Field,
FieldError,
FieldOptions,
ImproperlyConfigured,
IntegerField,
ListField,
Model,
ModelField,
ModelMeta,
ModelOptions,
ModelType,
NOT_FOUND
)
class TestBaseOptions(unittest.TestCase):
class TempOptions(BaseOptions):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
options = TempOptions(_missing=True, here=True)
class IncompleteOptions(BaseOptions):
pass
def test_IncompleteOptions_raises_when_instantiated(self):
self.assertRaises(TypeError, self.IncompleteOptions)
def test_len_equals_iter_len(self):
self.assertEqual(len(self.options), len(dict(self.options)))
self.assertEqual(len(self.options), 1)
def test_mapping(self):
def func(**kwargs):
return kwargs
self.assertEqual(func(**self.options), {'here': True})
def test_str(self):
self.assertEqual(str(self.options), '(here=True)')
def test_repr(self):
self.assertEqual(repr(self.options), '<TempOptions: (here=True)>')
def test_iter(self):
self.assertEqual(dict(self.options), {'here': True})
class TestModelOptions(unittest.TestCase):
def test_is_subclass_of_BaseOptions(self):
self.assertTrue(issubclass(ModelOptions, BaseOptions))
def test_init_with_no_meta_cls(self):
| options = ModelOptions()
self.assertFalse(options.abstract)
de | f test_init_with_meta_cls(self):
class Meta:
abstract = True
options = ModelOptions(Meta)
self.assertTrue(options.abstract)
def test_init_with_meta_cls_unknown_options(self):
class Meta:
unknown = 'value'
self.assertRaises(ImproperlyConfigured, ModelOptions, Meta)
class TestModelMeta(unittest.TestCase):
def test_init_defaults(self):
meta = ModelMeta()
self.assertIsNone(meta.model)
self.assertEqual(meta.fields, [])
self.assertEqual(meta.field_names, [])
def test_init_kwargs_mapping(self):
meta = ModelMeta(test=True)
self.assertTrue(meta.test)
def test_str(self):
meta = ModelMeta()
field1 = mock.MagicMock()
field1.name = 'foo'
meta.add_field(field1)
field2 = mock.MagicMock()
field2.name = 'bar'
meta.add_field(field2)
self.assertEqual(str(meta), 'foo, bar')
def test_repr(self):
meta = ModelMeta()
meta.model = 'test'
self.assertEqual(repr(meta), '<Meta: test>')
def test_add_field(self):
meta = ModelMeta()
field = mock.MagicMock()
field.name = 'test'
meta.add_field(field)
self.assertEqual(field.rel_idx, 0)
self.assertTrue(field in meta.fields)
self.assertTrue('test' in meta.field_names)
def test_add_field_with_field_replacement(self):
meta = ModelMeta()
field1 = mock.MagicMock()
field1.name = 'foo'
meta.add_field(field1)
field2 = mock.MagicMock()
field2.name = 'bar'
meta.add_field(field2)
self.assertTrue(meta.get_field('foo'), field1)
self.assertTrue(meta.fields[0], field1)
self.assertTrue(meta.fields[1], field2)
field3 = mock.MagicMock()
field3.name = 'foo'
meta.add_field(field3)
self.assertTrue(meta.get_field('foo'), field3)
self.assertTrue(meta.fields[0], field3)
self.assertTrue(meta.fields[1], field2)
class TestModelType(unittest.TestCase):
def test_meta_class_attr(self):
self.assertEqual(ModelType.meta_class, ModelMeta)
def test_options_class(self):
self.assertEqual(ModelType.options_class, ModelOptions)
def test_Meta_is_not_an_attribute(self):
class Meta:
abstract = False
model = ModelType.__new__(
ModelType,
str('SomeModel'),
(Model, ),
{'Meta': Meta, '__module__': 'foo'}
)
self.assertFalse(hasattr(model, 'Meta'))
def test_add_meta(self):
with mock.patch.object(ModelType, 'meta_class') as meta_class:
with mock.patch.object(ModelType, 'options_class') as options_class:
options_class.return_value = {}
meta = mock.MagicMock()
meta_class.return_value = meta
_meta = mock.MagicMock()
model = ModelType.__new__(
ModelType,
str('SomeModel'),
(Model, ),
{'Meta': _meta, '__module__': 'foo'}
)
options_class.assert_called_once_with(_meta)
meta_class.assert_called_once_with(model)
self.assertEqual(model.meta, meta)
def test_add_parent_fields(self):
with mock.patch.object(Model, 'meta') as meta:
field = mock.MagicMock()
field.add_to_class = mock.MagicMock()
meta.fields = [field]
model = ModelType.__new__(
ModelType,
str('SomeModel'),
(Model, ),
{'__module__': 'foo'}
)
self.assertTrue(field.add_to_class.called)
field.add_to_class.assert_called_once_with(
model, field.name
)
def test_add_fields(self):
field = CharField()
with mock.patch.object(field, 'add_to_class') as add_to_class:
model = ModelType.__new__(
ModelType,
str('SomeModel'),
(Model, ),
{'__module__': 'foo', 'foo': field}
)
self.assertFalse(hasattr(model, 'foo'))
self.assertTrue(add_to_class.called)
add_to_class.assert_called_once_with(model, 'foo')
def test_attribute_instances_with_add_to_class_method(self):
thing = mock.MagicMock()
thing.add_to_class = mock.MagicMock()
model = ModelType.__new__(
ModelType,
str('SomeModel'),
(Model, ),
{'__module__': 'foo', 'foo': thing}
)
self.assertTrue(thing.add_to_class.called)
thing.add_to_class.assert_called_once_with(model, 'foo')
def test_regular_attribute(self):
model = ModelType.__new__(
ModelType,
str('SomeModel'),
(Model, ),
{'__module__': 'foo', 'foo': 'bar'}
)
self.assertTrue(hasattr(model, 'foo'))
self.assertEqual(model.foo, 'bar')
class TestModels(unittest.TestCase):
class Friend(Model):
name = CharField()
age = IntegerField()
surname = CharField(source='last_name')
class Entity(Model):
id = IntegerField(required=True, allow_null=False)
first_name = CharField(source='name')
tags = ListField(source='foo.tags')
def setUp(self):
self.data = {
'id': 1,
'name': 'larry',
'url': 'google.com',
'foo': {
'bar': 2,
'tags': ['cool', 'easy'],
},
'friends': [
{'name': 'bob', 'age': 24},
{'name': 'alice', 'age': None, 'last_name': 'secret'}
]
}
def test_meta_fields_order(self):
self.assertEqual(self.Friend.meta.field_names, ['name', 'age', 'surname'])
self.assertEqual(self.Entity.meta.field_names, ['id', 'first_name', 'tags'])
def test_meta_fields_order_inheritance(self):
class Person(self.Entity):
friends = ListField(child=ModelField(self.Friend))
self.assertEqual(
Person.meta.field_names,
['id', 'first_name', 'tags', 'friends']
)
def test_init_with_data(self):
with mock.pat |
Deepak345/al-go-rithms | math/LCM/Python/LCM.py | Python | mit | 469 | 0.025586 | #Program to find the LCM of tw | o numbers
#Function to find GCD
def gcd(num1, num2):
if | num1 == num2:
return num1
if num1 > num2:
return gcd(num1-num2, num2)
return gcd(num1, num2-num1)
#Function to find LCM
def lcm(num1, num2):
return (num1*num2) // gcd(num1, num2)
#Driver function for testing above
def test():
num1, num2 = 12, 4
print('LCM of {} and {} is {}'.format(num1, num2, lcm(num1, num2)))
|
dimagi/commcare-hq | corehq/apps/data_dictionary/migrations/0006_caseproperty_group.py | Python | bsd-3-clause | 363 | 0 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_dictionary', '0005_casetype_fully_generated'),
]
| operations = [
migrations.AddField(
model_name='caseproperty',
name='group',
field=models. | TextField(default='', blank=True),
),
]
|
d1m0/browser_bench | compare.py | Python | mit | 1,909 | 0.014667 | #! /usr/bin/env python
from SimpleHTTPServer | import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from threading i | mport Thread
import os
import json
import subprocess
import sys
import argparse
from benchmark import runOneBenchmark, availableBenchmarks, runBenchmarkSet
from pickle import dump
from pick_safedispatch import pickSDBrowser
from common import error, debug
def compareBrowsers(browsers, benchmark, nruns, port):
results = [tuple(browsers)]
for i in xrange(0, args.nruns):
one_run = []
for browser in browsers:
one_run.append(runOneBenchmark(browser, benchmark, port))
results.append(tuple(one_run))
return results
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Run a single benchmark and print the results')
parser.add_argument('--benchmark', type=str, nargs='+', help='name of benchmark to run. One of: ' + ' '.join(availableBenchmarks()))
parser.add_argument('--nruns', type=int, help='number of times to run both')
parser.add_argument('--browsers', type=str, nargs='+', help='path to browser executables to compare')
parser.add_argument('--labels', type=str, nargs='+', help='human readable labels for each browser')
args = parser.parse_args()
if (len(args.browsers) != len(args.labels)):
error("You must enter the same number of labels and browser")
browserLabels = dict(zip(args.browsers, args.labels))
for benchmark in args.benchmark:
for browser in args.browsers:
if browser == "safedispatch":
binary = pickSDBrowser(benchmark)
else:
binary = browser
name = benchmark + "_" + str(args.nruns) + "_" + browserLabels[binary] +\
".pickl"
print "Running benchmark {0} for browser {1}".format(benchmark, browser)
res = runBenchmarkSet(binary, benchmark, 5005, args.nruns)
dump(res, open(name, 'w'))
|
sergey-raevskiy/dcs | src/smart/reed.py | Python | bsd-2-clause | 450 | 0.006667 | import dev, random, time
class reed(dev.dev):
def __init__(self, sendto, name):
self.sendto = sendto
self.name = name
def _loop(self):
open = 1 if random.uniform(0, 100) > 95 else 0
self.send(self.sendto, open)
time.sleep(2)
def main(argv):
src = int(argv[0])
uplevel = int( | argv[1])
sendto = int(argv[2])
name = argv[3]
reed(sendto, name).run(src, uplevel)
| |
thiagopena/djangoSIGE | djangosige/apps/estoque/views/movimento.py | Python | mit | 12,100 | 0.001903 | # -*- coding: utf-8 -*-
from django.urls import reverse_lazy
from django.shortcuts import redirect
from itertools import chain
from datetime import datetime
from decimal import Decimal
from djangosige.apps.base.custom_views import CustomDetailView, CustomCreateView, CustomListView
from djangosige.apps.estoque.forms import EntradaEstoqueForm, SaidaEstoqueForm, TransferenciaEstoqueForm, ItensMovimentoFormSet
from djangosige.apps.estoque.models import MovimentoEstoque, EntradaEstoque, SaidaEstoque, TransferenciaEstoque, ProdutoEstocado
class MovimentoEstoqueMixin(object):
def adicionar_novo_movimento_estoque(self, itens_mvmt_obj, pform, lista_produtos, lista_produtos_estocados):
prod = itens_mvmt_obj.produto
lista_produtos.append(prod)
# Modificar valor do estoque atual dos produtos
if prod.estoque_atual is not None and isinstance(self.object, EntradaEstoque):
prod_estocado = ProdutoEstocado.objects.get_or_create(
local=self.object.local_dest, produto=itens_mvmt_obj.produto)[0]
prod_estocado.quantidade = prod_estocado.quantidade + itens_mvmt_obj.quantidade
lista_produtos_estocados.append(prod_estocado)
prod.estoque_atual = prod.estoque_atual + itens_mvmt_obj.quantidade
elif prod.estoque_atual is not None and isinstance(self.object, SaidaEstoque):
prod_estocado = ProdutoEstocado.objects.get_or_create(
local=self.object.local_orig, produto=itens_mvmt_obj.produto)[0]
if itens_mvmt_obj.quantidade > prod_estocado.quantidade:
itens_mvmt_obj.quantidade = prod_estocado.quantidade
prod_estocado.quantidade = Decimal('0.00')
else:
prod_estocado.quantidade = prod_estocado.quantidade - itens_mvmt_obj.quantidade
lista_produtos_estocados.append(prod_estocado)
if prod.estoque_atual < itens_mvmt_obj.quantidade:
pform.add_error('quantidade', 'Quantidade retirada do estoque maior que o estoque atual (' +
str(prod.estoque_atual).replace('.', ',') + ') do produto.')
else:
prod.estoque_atual = prod.estoque_atual - itens_mvmt_obj.quantidade
elif isinstance(self.object, TransferenciaEstoque):
prod_estocado_orig = ProdutoEstocado.objects.get_or_create(
local=self.object.local_estoque_orig, produto=itens_mvmt_obj.produto)[0]
prod_estocado_dest = ProdutoEstocado.objects.get_or_create(
local=self.object.local_estoque_dest, produto=itens_mvmt_obj.produto)[0]
if itens_mvmt_obj.quantidade > prod_estocado_orig.quantidade:
itens_mvmt_obj.quantidade = prod_estocado_orig.quantidade
prod_estocado_orig.quantidade = Decimal('0.00')
else:
prod_estocado_orig.quantidade = prod_estocado_orig.quantidade - \
itens_mvmt_obj.quantidade
prod_estocado_dest.quantidade = prod_estocado_dest.quantidade + \
itens_mvmt_obj.quantidade
lista_produtos_estocados.append(prod_estocado_orig)
lista_produtos_estocados.append(prod_estocado_dest)
class AdicionarMovimentoEstoqueBaseView(CustomCreateView, MovimentoEstoqueMixin):
permission_codename = 'add_movimentoestoque'
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, pk=self.object.pk)
def get_context_data(self, **kwargs):
context = super(AdicionarMovimentoEstoqueBaseView,
self).get_context_data(**kwargs)
return self.view_context(context)
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = form_class()
form.initial['data_movimento'] = datetime.today().strftime('%d/%m/%Y')
itens_form = ItensMovimentoFormSet(prefix='itens_form')
return self.render_to_response(self.get_context_data(form=form, itens_form=itens_form,))
def post(self, request, *args, **kwargs):
self.object = None
# Tirar . dos campos decimais
req_post = request.POST.copy()
for key in req_post:
if ('quantidade' in key or
'valor' in key or
'total' in key):
req_post[key] = req_post[key].replace('.', '')
request.POST = req_post
form_class = self.get_form_class()
form = self.get_form(form_class)
itens_form = ItensMovimentoFormSet(request.POST, prefix='itens_form')
if (form.is_valid() and itens_form.is_valid()):
self.object = form.save(commit=False)
lista_produtos = []
lista_produtos_estocados = []
itens_form.instance = self.object
for pform in itens_form:
if pform.cleaned_data != {}:
itens_mvmt_obj = pform.save(commit=False)
itens_mvmt_obj.movimento_id = self.object
self.adicionar_novo_movimento_estoque(
itens_mvmt_obj, pform, lista_produtos, lista_produtos_estocados)
# Verificar se movimentos de estoque invalidos existem
if len(pform.errors):
return self.form_invalid(form=form, itens_form=itens_form)
else:
self.object.save()
itens_form.save()
for prod in lista_produtos:
prod.save()
for prod_estocado in lista_produtos_estocados:
prod_estocado.save()
return self.form_valid(form)
return self.form_invalid(form=form, itens_form=itens_form)
class AdicionarEntradaEstoqueView(AdicionarMovimentoEstoqueBaseView):
form_class = EntradaEstoqueForm
template_name = "estoque/movimento/movimento_estoque_add.html"
success_url = reverse_lazy('estoque:listaentradasestoqueview')
success_message = "<b>Movimento de estoque de entrada nº%(pk)s</b> adicionado com sucesso."
def view_context(self, context):
context['title_complete'] = 'ADICIONAR ENTRADA EM ESTOQUE'
context['return_url'] = reverse_lazy(
'estoque:listaentradasestoqueview')
return context
class AdicionarSaidaEstoqueView(AdicionarMovimentoEstoqueBaseView):
form_class = SaidaEstoqueForm
template_name = "estoque/movimento/movimento_estoque_add.html"
success_url = reverse_lazy('estoque:listasaidasestoqueview')
success_message = "<b>Movimento de estoque de saída nº%(pk)s</b> adicionado com sucesso."
def view_context(self, context):
context['title_complete'] = 'ADICIONAR SAÍDA EM ESTOQUE'
context['return_url'] = reverse_lazy('estoque:listasaidasestoqueview')
return context
class AdicionarTransferenciaEstoqueView(AdicionarMovimentoEstoqueBaseView):
form_class = TransferenciaEstoqueForm
template_name = "estoque/movimento/movimento_estoque_add.html"
success_url = reverse_lazy('estoque:listatransferenciasestoqueview')
success_message = "<b>Movimento de estoque de transferência nº%(pk)s</b> adicionado com suce | sso."
def view_context(self, context):
context['title_complete'] = 'ADICIONAR TRANSFERÊNCIA EM ESTOQUE'
context['return_url'] = reverse_lazy(
'estoque:listatransferenciasestoqueview')
return context
class MovimentoEstoqueBaseListView(CustomListView):
permission_codename = 'view_movimentoestoque'
def get_context_data(self, **kwargs):
context = super(MovimentoEstoqueBaseListView,
| self).get_context_data(**kwargs)
return self.view_context(context)
class MovimentoEstoqueListView(MovimentoEstoqueBaseListView):
template_name = 'estoque/movimento/movimento_estoque_list.html'
context_object_name = 'all_movimentos'
success_url = reverse_lazy('estoque:listamovimentoestoqueview')
def view_context(self, context):
context['title_complete'] = 'TODAS AS MOVIMENTAÇÕES DE ESTOQUE'
return context
def get_quer |
chrisortman/CIS-121 | k0765042/Midterm/Main.py | Python | mit | 5,343 | 0.003556 | """
Bryant Conquest
This is a Game
"""
from death import Finish
from death import lives
from scene import Scene
import lock
import random
import time
#Weapons
fist = 0
metal_pipe = 0
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('death')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
class GettingToKnowEveryone(Scene):
def enter(self):
lives = 2
print "Gaurd: What is your name Sir"
name = raw_input(">")
time.sleep(1)
if name == "Dave" or name == "dave":
print("Gaurd: Oh Did not know that was you Dave sorry for the inconveince.")
return 'death'
else:
print "Gaurd: Get moving %s" %name
print "Gaurd: You nice and comfy in there?"
print "Gaurd: Cause you better be you were the last one to be round up."
print "Gaurd: Caused Quite a lot of trouble"
print "Gaurd: And remember always tell the truth."
return 'the_beginning'
class TheBeginning(Scene):
def enter(self):
global lives
global metal_pipe
print(chr(27) + "[2J")
print "Prisoner 1: They Finally found you huh."
print "Prisoner 1: You were are last chance. "
print "Prisoner 1: Did you ever Figure out your powers? "
escape_choice = raw_input(">")
time.sleep(1)
if escape_choice == "Y":
print "Prisoner 1: You sit on a throne of lies."
print "The Prisoner pulls out a rusty pipe and hits you"
print "The Prisoner washes away as if it was just a memory"
print "-1 life"
lives = lives - 1
elif escape_choice == "N":
print "Prisoner 1: One of the things you can do is you have lives that regenerate over time. "
print "Prisoner 1: They also regenerate if you do something that is worthy."
print "+1 Lives"
lives = lives + 1
print "Prisoner 1: I have a gift for you as well."
print "The Prisoner washes away as if it was just a memory and drops a pipe"
print "Do you pick up the pipe?"
weapon_choice = raw_input(">")
if weapon_choice == 'y' or weapon_choice == 'Y':
print "Gained Metal Pipe"
metal_pipe = 1
elif weapon_choice == 'n' or weapon_choice == 'N':
print "Have you not your tetnaus shots?"
else:
print "Invalid Answer"
print "Behind him 2 tunnels appear one on the right and one on the left."
else:
print "Not a valid choice Y or | N for an answer"
return "the_beginning"
print ("Which Tunnel do you pick the left or the right")
choice = raw_input(">")
if choice == "left":
return 'left_t | unnel'
if choice == "right":
return 'right_tunnel'
class LeftTunnel(Scene):
def enter(self):
global lives
print "You find a chest and wonder what's inside"
print "The Lock looks to be some mastermind type game"
right = lock.passcode()
print right
if right == 1:
print "You open the chest to find a golden sword"
print "You trun around and see a silverish, dragon"
print "What do you do? hit"
raw_input(">")
print "The Sword shatters in your hands as you hit the dragon."
if metal_pipe == "1":
print "You pull out your last defense a metal pipe."
print "You stab the dragon with your pipe and it turns into stone"
lives = 10
return 'death'
else:
print "The Dragon eats you and you go towards the light."
return 'death'
elif right == 0:
print "The chest starts making a ticking sound."
print "You try to run away but the chest explodes."
lives = 0
return 'death'
class RightTunnel(Scene):
def enter(self):
global lives
print "You are walking through the tunnel and see a light"
print "You notice that the cave is now made out of a solid vine like structutrue that is on fire."
print "You try and run away but die and fall into the abyss below"
lives = 0
return 'death'
class Map(object):
scenes = {
'getting_to_know_everyone': GettingToKnowEveryone(),
'the_beginning': TheBeginning(),
'left_tunnel': LeftTunnel(),
'right_tunnel': RightTunnel(),
'death': Finish(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('getting_to_know_everyone')
a_game = Engine(a_map)
a_game.play() |
hellohaptik/chatbot_ner | external_api/api.py | Python | gpl-3.0 | 12,022 | 0.003244 | from __future__ import absolute_import
import json
import random
from django.http import HttpResponse
from datastore.datastore import DataStore
from datastore.exceptions import (DataStoreSettingsImproperlyConfiguredException, EngineNotImplementedException,
EngineConnectionException, IndexForTransferException,
AliasForTransferException, NonESEngineTransferException)
from datastore.exceptions import IndexNotFoundException, InvalidESURLException, \
SourceDestinationSimilarException, \
InternalBackupException, AliasNotFoundException, PointIndexToAliasException, \
FetchIndexForAliasException, DeleteIndexFromAliasException
from chatbot_ner.config import ner_logger
from external_api.constants import ENTITY_DATA, ENTITY_NAME, LANGUAGE_SCRIPT, ENTITY_LIST, \
EXTERNAL_API_DATA, SENTENCES, LANGUAGES
from django.views.decorators.csrf import csrf_exempt
from external_api.lib import dictionary_utils
from external_api.response_utils import external_api_response_wrapper
from external_api.exceptions import APIHandlerException
def get_entity_word_variants(request):
"""
This function is used obtain the entity dictionary given the dictionary name.
Args:
request (HttpResponse): HTTP response from url
Returns:
HttpResponse : With data consisting of a list of value variants.
"""
response = {"success": False, "error": "", "result": []}
try:
entity_name = request.GET.get(ENTITY_NAME)
datastore_obj = DataStore()
result = datastore_obj.get_entity_dictionary(entity_name=entity_name)
structured_result = []
# The list around result.keys() is to make it compatible to python3
key_list = list(result.keys())
key_list.sort()
for value in key_list:
structured_result.append({'value': value, 'variants': result[value]})
result = structured_result
response['result'] = result
response['success'] = True
except (DataStoreSettingsImproperlyConfiguredException,
EngineNotImplementedException,
EngineConnectionException, FetchIndexForAliasException) as error_message:
response['error'] = str(error_message)
ner_logger.exception('Error: %s' % error_message)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
except Exception as e:
response['error'] = str(e)
ner_logger.exception('Error: %s' % e)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
return HttpResponse(json.dumps(response), content_type='application/json', status=200)
@csrf_exempt
def update_dictionary(request):
"""
This function is used to update the dictionary entities.
Args:
request (HttpResponse): HTTP response from url
Returns:
HttpR | esponse : HttpResponse with appropriate status and error message.
"""
response = {"success": False, "error": "", "result": []}
try:
external_api_data = json.loads(request.POST.get(EXTERNAL_API_DATA))
entity_name = external_api_data.get(ENTITY_NAME)
entity_data = external_api_data.get(ENTITY_DATA)
language_script = external_api_dat | a.get(LANGUAGE_SCRIPT)
datastore_obj = DataStore()
datastore_obj.update_entity_data(entity_name=entity_name,
entity_data=entity_data,
language_script=language_script)
response['success'] = True
except (DataStoreSettingsImproperlyConfiguredException,
EngineNotImplementedException,
EngineConnectionException, FetchIndexForAliasException) as error_message:
response['error'] = str(error_message)
ner_logger.exception('Error: %s' % error_message)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
except Exception as e:
response['error'] = str(e)
ner_logger.exception('Error: %s' % e)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
return HttpResponse(json.dumps(response), content_type='application/json', status=200)
@csrf_exempt
def transfer_entities(request):
"""
This method is used to transfer entities from the source to destination.
Args:
request (HttpResponse): HTTP response from url
Returns:
HttpResponse : HttpResponse with appropriate status and error message.
"""
response = {"success": False, "error": "", "result": []}
try:
external_api_data = json.loads(request.POST.get(EXTERNAL_API_DATA))
entity_list = external_api_data.get(ENTITY_LIST)
datastore_object = DataStore()
datastore_object.transfer_entities_elastic_search(entity_list=entity_list)
response['success'] = True
except (IndexNotFoundException, InvalidESURLException,
SourceDestinationSimilarException, InternalBackupException, AliasNotFoundException,
PointIndexToAliasException, FetchIndexForAliasException, DeleteIndexFromAliasException,
AliasForTransferException, IndexForTransferException, NonESEngineTransferException) as error_message:
response['error'] = str(error_message)
ner_logger.exception('Error: %s' % error_message)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
except Exception as e:
response['error'] = str(e)
ner_logger.exception('Error: %s' % e)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
return HttpResponse(json.dumps(response), content_type='application/json', status=200)
def get_crf_training_data(request):
"""
This function is used obtain the training data given the entity_name.
Args:
request (HttpRequest): HTTP response from url
Returns:
HttpResponse : With data consisting of a dictionary consisting of sentence_list and entity_list
Examples:
get request params
key: "entity_name"
value: "city"
"""
response = {"success": False, "error": "", "result": []}
try:
entity_name = request.GET.get(ENTITY_NAME)
languages = request.GET.get(LANGUAGES, '')
languages = languages.split(',') if languages else []
result = DataStore().get_crf_data_for_entity_name(entity_name=entity_name, languages=languages)
response['result'] = result
response['success'] = True
except (DataStoreSettingsImproperlyConfiguredException,
EngineNotImplementedException,
EngineConnectionException, FetchIndexForAliasException) as error_message:
response['error'] = str(error_message)
ner_logger.exception('Error: %s' % error_message)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
except Exception as e:
response['error'] = str(e)
ner_logger.exception('Error: %s' % e)
return HttpResponse(json.dumps(response), content_type='application/json', status=500)
return HttpResponse(json.dumps(response), content_type='application/json', status=200)
@csrf_exempt
def update_crf_training_data(request):
"""
This function is used to update the training data
Args:
request (HttpRequest): HTTP response from url
Returns:
HttpResponse : HttpResponse with appropriate status and error message.
Example for data present in
Post request body
key: "external_api_data"
value: {"sentence_list":["hello pratik","hello hardik"], "entity_list":[["pratik"], ["hardik"]],
"entity_name":"training_try3", "language_script": "en"}
"""
response = {"success": False, "error": "", "result": []}
try:
external_api_data = json.loads(request.POST.get(EXTERNAL_API_DATA))
sentences = external_api_data.get(SENTENCES)
entity_name = external_api_data.get(ENTITY_NAME)
DataStore().update_entity_crf_data(entity_name=entity_name, sentences=sen |
ducu/twitter-most-followed | main.py | Python | mit | 3,521 | 0.027833 | """
Twitter Most Followed
Finding out top most followed accounts by a particular
group of Twitter users such as the Hacker News community.
For this exercise we consider @newsyc20 as our *source*,
and @newsyc20 followers as the HNers, our *target group*.
You can easily run the exercise for a different target
group by specifying the corresponding target group source.
Or you can modify the script so it considers several
sources to start from, such as @newsyc20, @brainpickings,
and @ThisIsSethsBlog. This should be more interesting.
"""
import twitter as t
from twitter import TweepError
import redis
r = redis.StrictRedis(db=0)
import storage
s = storage.RedisStorage(r)
from datetime import datetime
select_user_data = lambda u: dict([(k, getattr(u, k)) for k in \
['screen_name', 'name', 'description', 'friends_count', 'followers_count']])
def load_user_data(user_id=None, screen_name=None):
"""
Retrieve and set user's data.
Or get it from the store if already there.
"""
assert bool(user_id) != bool(screen_name)
if user_id:
user_data = s.get_user_data(user_id)
if user_data:
return user_id, user_data
user = t.get_user(user_id=user_id)
else: # screen_name
user = t.get_user(screen_name=screen_name)
user_id = user.id
user_data = select_user_data(user)
s.set_user_data(user_id, user_data)
return user_id, user_data
def load_followers(user_id):
"""
Retrieve and set user's followers.
"""
followers = sorted(list(t.followers_ids(user_id)))
s.set_followers(user_id, followers)
# followers = s.get_followers(user_id)
return followers
def load_friends(user_id):
"""
Retrieve and set user's friends.
"""
if s.is_protected(user_id) or \
s.has_friends(user_id): # loaded before
return
try:
friends = sorted(list(t.friends_ids(user_id)))
s.set_friends(user_id, friends)
except TweepError, e:
if 'Not authorized' in str(e):
s.mark_protected(user_id)
def aggregate_friends():
"Aggregate friends into top most followed."
s.set_most_followed()
def top_most_followed(n):
"""
Display top n most followed.
"""
i = 1
top = s.get_most_followed(n) # withscores
format = "%d | %d | %s | %s | %s ([@%s](https://twitter.com/%s))"
print "Rank | Popularity | Followers | Friends | Name (@twitter)"
print "--- | --- | --- | --- | ---"
for user_id, score in top:
user_id, user_data = load_user_data(user_id=user_id)
print format % (i, score,
user_data['followers_count'], user_data['fr | iends_count'],
user_data['name'],
user_data['screen_name'], user_data['screen_name'])
i += 1
def main():
"""
Starting from a source (e.g. @newsyc20),
consider the target group as the source's followers, and
find out top most followed accounts by the target group.
"""
# Step 1: Identify the source
print "\nStep 1: %s" % datetime.now()
source_name = 'newsyc20' # target group source
source_id, source_data = load_user_data(screen_name=source_name)
# Step 2: Load target group member | s
print "\nStep 2: %s" % datetime.now()
followers = load_followers(source_id) # target group
# Step 3: Load friends of target group members
print "\nStep 3: %s" % datetime.now()
for follower_id in followers:
load_friends(user_id=follower_id)
# Step 4: Aggregate friends into top most followed
print "\nStep 4: %s" % datetime.now()
aggregate_friends() # count friend occurrences
print "\nDone: %s" % datetime.now()
print "\nTop most followed by @%s's followers" % source_name
top_most_followed(100) # display results
if __name__ == '__main__':
main()
|
mfwarren/FreeCoding | 2015/05/fc_2015_05_04.py | Python | mit | 412 | 0.004854 | #!/usr/bin/env python3
# imports go here
import sched
import time
#
# Fre | e Coding session for 2015-05-04
# Written by Matt Warren
#
scheduler = sched.scheduler(time.time, time.sleep)
def print_time():
print(time.time())
return True
scheduler.enter(3, 1, print_time)
scheduler.enter(5, 1, print_time)
print(scheduler.queue)
schedul | er.run() # blocking until all scheduled things finish
print("done")
|
openpli-arm/enigma2-arm | lib/python/Tools/Trashcan.py | Python | gpl-2.0 | 4,571 | 0.03916 | import time
import os
try:
import enigma
from Components.config import config
except:
print "Cannot import enigma"
from Directories import resolveFilename, SCOPE_HDD
def getTrashFolder():
# Returns trash folder without symlinks
return os.path.realpath(os.path.join(resolveFilename(SCOPE_HDD), ".Trash"))
def createTrashFolder():
trash = getTrashFolder()
if not os.path.isdir(trash):
os.mkdir(trash)
return trash
class Trashcan:
def __init__(self, session):
self.session = session
session.nav.record_event.append(self.gotRecordEvent)
self.gotRecordEvent(None, None)
def gotRecordEvent(self, service, event):
print "[Trashcan] gotRecordEvent", service, event
self.recordings = len(self.session.nav.getRecordings())
if (event == enigma.iRecordableService.evEnd):
self.cleanIfIdle()
def destroy(self):
if self.session is not None:
self.session.nav.record_event.remove(self.gotRecordEvent)
self.session = None
def __del__(self):
self.destroy()
def cleanIfIdle(self):
# RecordTimer calls this when preparing a recording. That is a
# nice moment to clean up.
if self.recordings:
print "[Trashcan] Recording in progress", self.recordings
return
try:
ctimeLimit = time.time() - (config.usage.movielist_trashcan_days.value * 3600 * 24)
reserveBytes = 1024*1024*1024 * int(config.usage.movielist_trashcan_reserve.value)
clean(ctimeLimit, reserveBytes)
except Exception, e:
print "[Trashcan] Weirdness:", e
def clean(ctimeLimit, reserveBytes):
# Remove expired items from trash, and attempt to have
# reserveBytes of free disk space.
trash = getTrashFolder()
if not os.path.isdir(trash):
print "[Trashcan] No trash.", trash
return 0
diskstat = os.statvfs(trash)
free = diskstat.f_bfree * diskstat.f_bsize
bytesToRemove = reserveBytes - free
candidates = []
print "[Trashcan] bytesToRemove", bytesToRemove
size = 0
for root, dirs, files in os.walk(trash, topdown=False):
for name in files:
try:
fn = os.path.join(root, name)
st = os.stat(fn)
if st.st_ctime < ctimeLimit:
print "[Trashcan] Too old:", name, st.st_ctime
enigma.eBackgroundFileEraser.getInstance().erase(fn)
bytesToRemove -= st.st_size
else:
candidates.append((st.st_ctime, fn, st.st_size))
size += st.st_size
except Exception, e:
print "[Trashcan] Failed | to stat %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
| except:
pass
candidates.sort()
# Now we have a list of ctime, candidates, size. Sorted by ctime (=deletion time)
print "[Trashcan] Bytes to remove:", bytesToRemove
print "[Trashcan] Size now:", size
for st_ctime, fn, st_size in candidates:
if bytesToRemove < 0:
break
enigma.eBackgroundFileEraser.getInstance().erase(fn)
bytesToRemove -= st_size
size -= st_size
print "[Trashcan] Size now:", size
def cleanAll():
trash = getTrashFolder()
if not os.path.isdir(trash):
print "[Trashcan] No trash.", trash
return 0
for root, dirs, files in os.walk(trash, topdown=False):
for name in files:
fn = os.path.join(root, name)
try:
enigma.eBackgroundFileEraser.getInstance().erase(fn)
except Exception, e:
print "[Trashcan] Failed to erase %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except:
pass
def init(session):
global instance
instance = Trashcan(session)
# Unit test
# (can be run outside enigma. Can be moved somewhere else later on)
if __name__ == '__main__':
class Fake:
def __init__(self):
self.record_event = []
self.nav = self
self.RecordTimer = self
self.usage = self
self.movielist_trashcan_days = self
self.movielist_trashcan_reserve = self
self.value = 1
self.eBackgroundFileEraser = self
self.iRecordableService = self
self.evEnd = None
def getInstance(self):
# eBackgroundFileEraser
return self
def erase(self, fn):
print "ERASE", fn
def getNextRecordingTime(self):
# RecordTimer
return time.time() + 500
def getRecordings(self):
return []
def destroy(self):
if self.record_event:
raise Exception, "record_event not empty" + str(self.record_event)
s = Fake()
createTrashFolder()
config = s
enigma = s
init(s)
diskstat = os.statvfs('/hdd/movie')
free = diskstat.f_bfree * diskstat.f_bsize
# Clean up one MB
clean(1264606758, free + 1000000)
cleanAll()
instance.destroy()
s.destroy()
|
dedelost/py-demoList | python基础教程/第六章抽象/抽象.py | Python | mit | 215 | 0.013953 | # -*- coding:utf-8 -*-
def fib(num):
'fibs function'
fibs = [0,1]
| for x in range(num):
fibs.append(fibs[-2]+fibs[-1])
print fibs
print callable(fib)
fib(15)
print fib.__doc__
print he | lp(fib) |
resmo/ansible | lib/ansible/modules/network/slxos/slxos_facts.py | Python | gpl-3.0 | 13,825 | 0.000796 | #!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: slxos_facts
version_added: "2.6"
author: "Lindsay Hill (@LindsayHill)"
short_description: Collect facts from devices running Extreme SLX-OS
description:
- Collects a base set of device facts from a remote device that
is running SLX-OS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
notes:
- Tested against SLX-OS 17s.1.02
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: ['!config']
"""
EXAMPLES = """
# Collect all facts from the device
- slxos_facts:
gather_subset: all
# Collect only the config and default facts
- slxos_facts:
gather_subset:
- config
# Do not collect hardware facts
- slxos_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All Primary IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.slxos.slxos import run_commands
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS)
def run(self, cmd):
return run_commands(self.module, cmd)
class Default(FactsBase):
COMMANDS = [
'show version',
'show inventory chassis',
r'show running-config | include host\-name'
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
if data:
self.facts['version'] = self.parse_version(data)
data = self.responses[1]
if data:
self.facts['model'] = self.parse_model(data)
self.facts['serialnum'] = self.parse_serialnum(data)
data = self.responses[2]
if data:
self.facts['hostname'] = self.parse_hostname(data)
def parse_version(self, data):
match = re.search(r'SLX-OS Operating System Version: (\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'SID:(\S+)', data, re.M)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'switch-attributes host-name (\S+)', data, re.M)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'SN:(\S+)', data, re.M)
if match:
return match.group(1)
class Hardware(FactsBase):
COMMANDS = [
'show process memory summary'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
| self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0))
self.facts[ | 'memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0))
def parse_memtotal(self, data):
match = re.search(r'Total\s*Memory: (\d+)\s', data, re.M)
if match:
return match.group(1)
def parse_memfree(self, data):
match = re.search(r'Total Free: (\d+)\s', data, re.M)
if match:
return match.group(1)
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = [
'show interface',
'show ipv6 interface brief',
r'show lldp nei detail | inc ^Local\ Interface|^Remote\ Interface|^System\ Name'
]
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.responses[0]
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
self.populate_ipv4_interfaces(interfaces)
data = self.responses[1]
if data:
self.populate_ipv6_interfaces(data)
data = self.responses[2]
if data:
self.facts['neighbors'] = self.parse_neighbors(data)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv4_interfaces(self, data):
for key, value in data.items():
self.facts['interfaces'][key]['ipv4'] = list()
primary_address = addre |
vmendez/DIRAC | DataManagementSystem/private/FTS3/FTS3Placement.py | Python | gpl-3.0 | 5,870 | 0.028109 | from DIRAC import S_ERROR, S_OK, gLogger
from DIRAC.DataManagementSystem.private.FTSAbstractPlacement import FTSAbstractPlacement, FTSRoute
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3Servers
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
import random
class FTS3Placement( FTSAbstractPlacement ):
"""
This class manages all the FTS strategies, routes and what not
"""
__serverPolicy = "Random"
__nextServerID = 0
__serverList = None
__maxAttempts = 0
def __init__( self, csPath = None, ftsHistoryViews = None ):
"""
Call the init of the parent, and initialize the list of FTS3 servers
"""
self.log = gLogger.getSubLogger( "FTS3Placement" )
super( FTS3Placement, self ).__init__( csPath = csPath, ftsHistoryViews = ftsHistoryViews )
srvList = getFTS3Servers()
if not srvList['OK']:
self.log.error( srvList['Message'] )
self.__serverList = srvList.get( 'Value', [] )
self.maxAttempts = len( self.__serverList )
self.rssClient = ResourceStatus()
def getReplicationTree( self, sourceSEs, targetSEs, size, strategy = None ):
""" For multiple source to multiple destination, find the optimal replication
strategy.
:param sourceSEs : list of source SE
:param targetSEs : list of destination SE
:param size : size of the File
:param strategy : which strategy to use
:returns S_OK(dict) < route name : { dict with key Ancestor, SourceSE, TargetSEtargetSE, Strategy } >
For the time being, we are waiting for FTS3 to provide advisory mechanisms. So we just use
simple techniques
"""
# We will use a single random source
sourceSE = random.choice( sourceSEs )
tree = {}
for targetSE in targetSEs:
tree["%s#%s" % ( sourceSE, targetSE )] = { "Ancestor" : False, "SourceSE" : sourceSE,
"TargetSE" : targetSE, "Strategy" : "FTS3Simple" }
return S_OK( tree )
def refresh( self, ftsHistoryViews ):
"""
Refresh, whatever that means... recalculate all what you need,
fetches the latest conf and what not.
"""
return super( FTS3Placement, self ).refresh( ftsHistoryViews = ftsHistoryViews )
def __failoverServerPolicy(self, attempt = 0):
"""
Returns always the server at a given position (normally the first one)
:param attempt: position of the server in the list
"""
if attempt >= len( self.__serverList ):
raise Exception( "FTS3Placement.__failoverServerPolicy: attempt to reach non existing server index" )
return self.__serverList[attempt]
def __sequenceServerPolicy( self ):
"""
Every time the this policy is called, return the next server on the list
"""
fts3server = self.__serverList[self.__nextServerID]
self.__nextServerID = ( self.__nextServerID + 1 ) % len( self.__serverList )
return fts3server
def __randomServerPolicy(self):
"""
return a random server from the list
"""
return random.choice( self.__serverList )
def __chooseFTS3Server( self ):
"""
Choose the appropriate FTS3 server depending on the policy
"""
fts3Server = None
attempt = 0
# FIXME : need to get real valeu from RSS
ftsServerStatus = True
while not fts3Server and attempt < self.maxAttempts:
if self.__serverPolicy == 'Random':
fts3Server = self.__randomServerPolicy()
elif self.__serverPolicy == 'Sequence':
fts3Server = self.__sequenceServerPolicy()
elif self.__serverPolicy == 'Failover':
fts3Server = self.__fa | iloverServerPolicy( attempt = attempt )
else:
self.log.error( 'Unknown server policy %s. Using Random instead' % self.__serverPolicy )
fts3Server = self.__randomServerPolicy()
if not ftsServerStatus:
self.log.warn( 'FTS server %s is not in good shape. Choose another one' % fts3Server )
fts3Server = None
attempt += 1
# FIXME : I need to get the FTS server status from RSS
# ftsStatusFromRss = rss.ftsStatusOrSomethingLikeTha | t
if fts3Server:
return S_OK( fts3Server )
return S_ERROR ( "Could not find an FTS3 server (max attempt reached)" )
def findRoute( self, sourceSE, targetSE ):
""" Find the appropriate route from point A to B
:param sourceSE : source SE
:param targetSE : destination SE
:returns S_OK(FTSRoute)
"""
fts3server = self.__chooseFTS3Server()
if not fts3server['OK']:
return fts3server
fts3server = fts3server['Value']
route = FTSRoute( sourceSE, targetSE, fts3server )
return S_OK( route )
def isRouteValid( self, route ):
"""
FIXME: until RSS is ready, I check manually the status
In FTS3, all routes are valid a priori.
If a route was not valid for some reason, then FTS would know it
thanks to the blacklist sent by RSS, and would deal with it itself.
:param route : FTSRoute
:returns S_OK or S_ERROR(reason)
"""
rAccess = self.rssClient.getStorageElementStatus( route.sourceSE, "ReadAccess" )
self.log.debug( "se read %s %s" % ( route.sourceSE, rAccess ) )
if not rAccess["OK"]:
self.log.error( rAccess["Message"] )
return rAccess
if rAccess["Value"][route.sourceSE]["ReadAccess"] not in ( "Active", "Degraded" ):
return S_ERROR( "Source SE is not readable" )
wAccess = self.rssClient.getStorageElementStatus( route.targetSE, "WriteAccess" )
self.log.debug( "se write %s %s" % ( route.targetSE, wAccess ) )
if not wAccess["OK"]:
self.log.error( wAccess["Message"] )
return wAccess
if wAccess["Value"][route.targetSE]["WriteAccess"] not in ( "Active", "Degraded" ):
return S_ERROR( "Target SE is not writable" )
return S_OK()
|
openSUSE/docmanager | src/docmanager/cli/cmd_analyze.py | Python | gpl-3.0 | 1,621 | 0.001234 | #
# Copyr | ight (c) 2015 SUSE Linux GmbH
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will b | e useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
def analyze_subcmd(subparsers, queryformat, filters, sort, quiet, stop_on_error, default_output, filesargs):
"""Create the 'analyze' subcommand
:param subparsers: Subparser for all subcommands
:param queryformat: The queryformat
:param dict filesargs: Dict for FILE argument
"""
panalyze = subparsers.add_parser('analyze',
aliases=['a'],
help='Analyzes the given XML files.'
)
panalyze.add_argument('-qf', '--queryformat', **queryformat)
panalyze.add_argument('-f', '--filter', **filters)
panalyze.add_argument('-s', '--sort', **sort)
panalyze.add_argument('--stop-on-error', **stop_on_error)
panalyze.add_argument('-q', '--quiet', **quiet)
panalyze.add_argument('-do', '--default-output', **default_output)
panalyze.add_argument("files", **filesargs)
|
autowitch/pypov | scenes/geomorphs/lib/geomorphs/edge_5x10_003.py | Python | mit | 1,577 | 0.013951 | from pypov.pov import Texture, Pigment, Intersection, Cylinder
from pypov.pov import Union, Difference, Object, Box, Sphere
from pypov.common import grey, white
fr | om pypov.colors import Colors
from lib.base import five_by_ten_edge
from lib.textures import cross_hatch, cross_hatch_2, wall_texture_1
from lib.metadata import Metadata
def edge_5x10_003_info():
| return Metadata("Non connected edge passages", "e3",
description="Non connected edge passages",
block_type="edge",
bottom=0, top=20,
size="5x10",
repeatable=True,
fully_connected=False,
dead_ends=False,
entrance=False,
has_rooms=False,
passage_type="hewn",
wet=False,
multi_level=False,
keywords=['passages', 'boring', 'basic'])
def edge_5x10_003(rotate=(0, 0, 0), translate=(0, 0, 0), detail_level=1,
cross_hatch_texture=cross_hatch_2):
"""docstring for gm02"""
geomorph = Union(
Difference(
Union(
Object(five_by_ten_edge(), cross_hatch_texture),
),
Union(
# Halls
Box(( -22.5, 10.0001, -10), ( -27.5, 21, -26)),
Box(( 22.5, 10.0002, -10), ( 27.5, 21, -26)),
Box(( -51, 10, -2.5), (51, 21, 2.5)),
Box(( -27.5, 10, -10.0), (27.5, 21, -5.0)),
wall_texture_1
),
),
translate=translate,
rotate=rotate
)
return geomorph
|
DjangoAdminHackers/django-link-report | link_report/management/commands/update_sentry_404s.py | Python | mit | 232 | 0 | from __fu | ture__ import unicode_literals
from django.core.management.base import BaseCommand
from ...utils import update_sentry_404s
class Command(BaseCommand):
def handle(self, *args, **kwargs):
| update_sentry_404s()
|
LPM-HMS/COSMOS2 | setup.py | Python | gpl-3.0 | 2,740 | 0.00146 | import os
import re
import sys
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), "cosmos/VERSION"), "r") as fh:
__version__ = fh.read().strip()
def find_all(path, reg_expr, inverse=False, remove_prefix=False):
if not path.endswith("/"):
path = path + "/"
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
match = re.search(reg_expr, filename) is not None
if inverse:
match = not match
if match:
out = os.path.join(root, filename)
if remove_prefix:
out = out.replace(path, "")
yield out
install_requires = [
"funcsigs",
"boto3",
"blinker",
"sqlalchemy",
"networkx>=2.0",
"six",
"drmaa",
"more-itertools",
"decorator",
"python-dateutil",
"flask",
]
package_data = {"cosmos": list(find_all("cosmos/", ".py|.pyc$", inverse=True, remove_prefix=True))}
setup(
name="cosmos-wfm",
version=__version__,
scripts=["bin/cosmos", "bin/run_pyfunc"],
description="Workflow Management System",
long_description="Cosmos is a library for writing analysis pipelines, and is particularly suited pipelines "
"which analyze next generation sequencing genomic"
"data. See https://github.com/Mizzou-CBMI/COSMOS2 for details.",
url="https://mizzou-cbmi.github.io/",
author="Erik Gafni",
author_email="egafni@gmail.com",
maintainer="Erik Gafni",
maintainer_email="egafni@gmail.com",
license="GPL v3",
install_requires=install_requires,
extras_require={
"test": [
"flask",
"ipython",
"sphinx_rtd_the | me",
"black",
"pytest-timeout",
"pytest-xdist",
"ghp-import",
"sphinx",
"sphinx_rtd_theme",
]
},
packages=find_packages(),
include_package_data=True,
package_data=package_data,
# package_dir = {'cosmos': 'cosmos'},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: | 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Utilities",
],
keywords="workflow machine learning ipeline ngs manager management distributed sge "
"slurm genomics sequencing grid computing scientific",
)
|
release-monitoring/anitya | anitya/wsgi.py | Python | gpl-2.0 | 835 | 0.001198 | # This file is part of the Anitya project.
# Copyright (C) 2017 Red Hat, Inc.
#
# This | program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You shoul | d have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from .app import create
application = create()
|
jmaher/treeherder | tests/client/test_perfherder_client.py | Python | mpl-2.0 | 1,809 | 0.003317 | import unittest
import responses
from treeherder.client.thclient import PerfherderClient
class PerfherderClientTest(unittest.TestCase):
@responses.activate
def test_get_performance_signatures(self):
| pc = PerfherderClient()
url = pc._get_endpoint_url(pc.PERFORMANCE_SIGNATURES_ENDPOINT, project='mozilla-central')
content = {
'signature1': {'cheezburgers': 1},
'signature2': {'hamburgers': 2},
'signature3': {'cheezburgers': 2},
}
responses.add(responses.GET, url, json=content, match_querystring=True, status=200)
sigs = pc.get_performance_signatures(' | mozilla-central')
self.assertEqual(len(sigs), 3)
self.assertEqual(sigs.get_signature_hashes(), ['signature1', 'signature2', 'signature3'])
self.assertEqual(sigs.get_property_names(), set(['cheezburgers', 'hamburgers']))
self.assertEqual(sigs.get_property_values('cheezburgers'), set([1, 2]))
@responses.activate
def test_get_performance_data(self):
pc = PerfherderClient()
url = '{}?{}'.format(
pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project='mozilla-central'),
'signatures=signature1&signatures=signature2',
)
content = {
'signature1': [{'value': 1}, {'value': 2}],
'signature2': [{'value': 2}, {'value': 1}],
}
responses.add(responses.GET, url, json=content, match_querystring=True, status=200)
series_list = pc.get_performance_data(
'mozilla-central', signatures=['signature1', 'signature2']
)
self.assertEqual(len(series_list), 2)
self.assertEqual(series_list['signature1']['value'], [1, 2])
self.assertEqual(series_list['signature2']['value'], [2, 1])
|
cjohnson98/transistor-pi | radio3.py | Python | gpl-3.0 | 2,243 | 0.027196 | #!/usr/bin/python
# selects stream from tuning dial position
# monitors battery condition
from __future__ import division
import spidev
import time
import os
import gc
import sys
import math
global tune1, tune2, tunerout, volts2, volume1, volume2, volumeout, IStream
tune1 = False
tune2 = False
tunerout = False
volts2 = False
volume1 = False
volume2 = False
volumeout = False
IStream = False # start system on mp3s
# open spi port:
spi = spidev.SpiDev()
spi.open(0,0)
def ReadChannel(channel): # read channel from mcd3008; channel must be integer 0 - 7
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data # returns value between 0 and 1023
# check volume control here. If < 1,
# set voice control
os.popen("mpc repeat on") # playlist will loop
os.popen("mpc play -q ") # start playing whatever the last playlist was
while True: # main loop
# Check battery:
volts2 = ReadChannel(2)
if ((volts2 < 210) and (volts2 > 10)): # battery is present, but weak
print time.ctime()
print volts2
os.popen("mpc clear -q ")
os.popen("espeak -a 150 'battery low' 2>/dev/null")
os.popen("sudo shutdown -h now") # shutdown on low batt | ery
time.sleep(1)
sys.exit()
# read the tuning dial:
tune2 = ReadChannel(0)
if (tune2 == 0):
IStream = False
if (tune2 == 1023):
IStream = True
ditherfactor = int(tune2 / 50) + 3 # anti-dither
if ((tune2 < tune1 - ditherfactor) or (tune2 > tune1 + ditherfactor)): # tuning change?
tunerout = int(tune2 / 25) # returns a value of 0 to 40
tunerout = tunerout + (100 * IStream) # adds 100 to base if playing streams
tuneroutstring = "mpc load -q " + str(tunerout) # set up | the mpc instruction
os.popen("mpc clear -q") # stop play and clear the playlist
os.popen(tuneroutstring) # load the new playlist
os.popen("mpc play -q ") # start play
tune1 = tune2
time.sleep(.5)
# read the volume control:
volume2 = ReadChannel(1)
ditherfactor = int(volume2 / 50) + 1
if ((volume2 < volume1 - ditherfactor) or (volume2 > volume1 + ditherfactor)):
volumeout = int(math.log((volume2 + 1),10) * 33) + 1 # VC smoothing
volumeoutstring = "mpc volume -q " + str(volumeout)
os.popen(volumeoutstring)
volume1 = volume2
time.sleep(.5)
|
GNUDimarik/dimecoin | qa/rpc-tests/util.py | Python | mit | 5,261 | 0.008554 | # Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal
import json
import shutil
import subprocess
import time
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
START_P2P_PORT=11000
START_RPC_PORT=11100
def check_json_precision():
"""Make sure json library being used does not lose precision converting DIME values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = []
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 w | allets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir = os.path.join("cache", "node"+str(i))
os.makedirs(datadir)
with open(os.path.join(datadir, "bitco | in.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(START_P2P_PORT+i)+"\n");
f.write("rpcport="+str(START_RPC_PORT+i)+"\n");
args = [ "bitcoind", "-keypool=1", "-datadir="+datadir ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(START_P2P_PORT))
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
# Shut them down, and remove debug.logs:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(debug_log("cache", i))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
def start_nodes(num_nodes, dir):
# Start bitcoinds, and wait for RPC interface to be up and running:
devnull = open("/dev/null", "w+")
for i in range(num_nodes):
datadir = os.path.join(dir, "node"+str(i))
args = [ "bitcoind", "-datadir="+datadir ]
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
# Create&return JSON-RPC connections
rpc_connections = []
for i in range(num_nodes):
url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,)
rpc_connections.append(AuthServiceProxy(url))
return rpc_connections
def debug_log(dir, n_node):
return os.path.join(dir, "node"+str(n_node), "regtest", "debug.log")
def stop_nodes(nodes):
for i in range(len(nodes)):
nodes[i].stop()
del nodes[:] # Emptying array closes connections as a side effect
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes:
bitcoind.wait()
del bitcoind_processes[:]
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(START_P2P_PORT+node_num)
from_connection.addnode(ip_port, "onetry")
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
|
mesosphere/dcos-commons | frameworks/helloworld/tests/test_region_awareness.py | Python | apache-2.0 | 3,469 | 0.003171 | import logging
import os
import pytest
import sdk_cmd
import sdk_install
import sdk_marathon
import sdk_plan
impo | rt sdk_utils
from tests import config
log = logging.getLogger(__name__)
POD_NAMES = ["hello-0", "world-0", "world-1"]
REMOTE_REGION = os.environ.get("REMOTE_REGION") or None
def remote_region_enabled():
return REMOTE_REGION is not None
@pytest.fixture
def local_service():
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
3,
additional_options={
"service": {"scenario": | "MULTI_REGION", "allow_region_awareness": True}
},
)
yield
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture
def remote_service():
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
3,
additional_options={
"service": {
"scenario": "MULTI_REGION",
"allow_region_awareness": True,
"region": REMOTE_REGION,
}
},
)
yield
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
# Unlike the following tests, this one does not require that a remote region be configured
@pytest.mark.dcos_min_version("1.11")
@pytest.mark.sanity
@sdk_utils.dcos_ee_only
def test_nodes_deploy_to_local_region_by_default(configure_universe, local_service):
# Fetch master's region name: this is defined to be the local region
local_region = sdk_cmd.cluster_request("GET", "/mesos/state").json()["domain"]["fault_domain"][
"region"
]["name"]
for pod_name in POD_NAMES:
pod_region = get_pod_region(config.SERVICE_NAME, pod_name)
assert pod_region == local_region
@pytest.mark.dcos_min_version("1.11")
@pytest.mark.sanity
@pytest.mark.skipif(
not remote_region_enabled(),
reason="REMOTE_REGION is not configured: remote nodes needed for test",
)
@sdk_utils.dcos_ee_only
def test_nodes_can_deploy_to_remote_region(configure_universe, remote_service):
for pod_name in POD_NAMES:
pod_region = get_pod_region(config.SERVICE_NAME, pod_name)
assert pod_region == REMOTE_REGION
@pytest.mark.dcos_min_version("1.11")
@pytest.mark.sanity
@pytest.mark.skipif(
not remote_region_enabled(),
reason="REMOTE_REGION is not configured: remote nodes needed for test",
)
@sdk_utils.dcos_ee_only
def test_region_config_update_does_not_succeed(configure_universe, local_service):
change_region_config(REMOTE_REGION)
sdk_plan.wait_for_plan_status(config.SERVICE_NAME, "deploy", "ERROR", timeout_seconds=180)
change_region_config(None)
sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME, timeout_seconds=180)
def change_region_config(region_name):
service_config = sdk_marathon.get_config(config.SERVICE_NAME)
if region_name is None:
del service_config["env"]["SERVICE_REGION"]
else:
service_config["env"]["SERVICE_REGION"] = region_name
sdk_marathon.update_app(service_config, wait_for_completed_deployment=False)
def get_pod_region(service_name, pod_name):
info = sdk_cmd.service_request("GET", service_name, "/v1/pod/{}/info".format(pod_name)).json()[
0
]["info"]
return [l["value"] for l in info["labels"]["labels"] if l["key"] == "offer_region"][0]
|
seraphln/onedrop | onedrop/scripts/batch_add_crawler_seeds.py | Python | gpl-3.0 | 999 | 0.005417 | # coding=utf8
#
"""
批量将采集任务的种子信息放到对应的采集队列里
"""
# 添加django的环境变量
import os
import sys
from os.path import dirname, join
sys.path.appen | d(join(dirname(__file__), '../'))
sys.path.append(join(dirname(__file__), '../../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'onedrop.settings'
import django
django.setup()
from datetime import datetime
from datetime import timedelta
from onedrop.odtasks.models import CrawlerSeeds
from onedrop.utils.redis_op import rop
def backend():
""" 加载爬虫种子信息到redis """
now = datetime.utcnow()
seeds = CrawlerS | eeds.objects.filter()
for seed in seeds:
seed.status = "crawling"
seed.modified_on = now
seed.last_crawl_on = now
seed.save()
print "Putting %s to redis" % seed.name
rop.add_task_queue("onedrop.crawler.seed", str(seed.id))
rop.add_task_queue("seed", str(seed.id))
if __name__ == "__main__":
backend()
|
sreichholf/python-coherence | coherence/backends/tracker_storage.py | Python | mit | 28,384 | 0.011133 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
import os.path
from twisted.internet import reactor, defer
from twisted.python import failure, util
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.soap_service import errorCode
from coherence.upnp.core import utils
import dbus
import dbus.service
import coherence.extern.louie as louie
from coherence.backend import BackendItem, BackendStore
ROOT_CONTAINER_ID = 0
AUDIO_CONTAINER_ID = 100
AUDIO_ALL_CONTAINER_ID = 101
AUDIO_ARTIST_CONTAINER_ID = 102
AUDIO_ALBUM_CONTAINER_ID = 103
AUDIO_PLAYLIST_CONTAINER_ID = 104
AUDIO_GENRE_CONTAINER_ID = 105
VIDEO_CONTAINER_ID = 200
VIDEO_ALL_CONTAINER_ID = 201
IMAGE_CONTAINER_ID = 300
IMAGE_ALL_CONTAINER_ID = 301
BUS_NAME = 'org.freedesktop.Tracker'
OBJECT_PATH = '/org/freedesktop/tracker'
tracks_query = """
<rdfq:Condition>\
<rdfq:equals>\
<rdfq:Property name="Audio:Title" />\
<rdf:String>*</rdf:String>\
</rdfq:equals>\
</rdfq:Condition>\
"""
video_query = """
<rdfq:Condition>\
<rdfq:equals>\
<rdfq:Property name="File:Name" />\
<rdf:String>*</rdf:String>\
</rdfq:equals>\
</rdfq:Condition>\
"""
image_query = """
<rdfq:Condition>\
<rdfq:equals>\
<rdfq:Property name="File:Name" />\
<rdf:String>*</rdf:String>\
</rdfq:equals>\
</rdfq:Condition>\
"""
class Container(BackendItem):
logCategory = 'tracker_store'
def __init__(self, id, parent_id, name, store=None, children_callback=None, container_class=DIDLLite.Container):
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.item = container_class(id, parent_id,self.name)
self.item.childCount = 0
self.update_id = 0
if children_callback != None:
self.children = children_callback
else:
self.children = util.OrderedDict()
self.item.childCount = None #self.get_child_count()
if store!=None:
self.get_url = lambda: store.urlbase + str(self.id)
def add_child(self, child):
id = child.id
if isinstance(child.id, basestring):
_,id = child.id.split('.')
self.children[id] = child
if self.item.childCount != None:
self.item.childCount += 1
def get_children(self,start=0,end=0):
self.info("container.get_children %r %r", start, end)
if callable(self.children):
return self.children(start,end-start)
else:
children = self.children.values()
if end == 0:
return children[start:]
else:
return children[start:end]
def get_child_count(self):
if self.item.childCount != None:
return self.item.childCount
if callable(self.children):
return len(self.children())
else:
return len(self.children)
def get_item(self):
return self.item
def get_name(self):
return self.name
def get_id(self):
return self.id
class Artist(BackendItem):
logCategory = 'tracker_store'
def __init__(self, store, id, name):
self.store = store
self.id = 'artist.%d' % int(id)
self.name = name
self.children = {}
self.sorted_children = None
def add_child(self, child):
_,id = child.id.split('.')
self.children[id] = child
def sort_children(self):
if self.sorted_children == None:
def childs_sort(x,y):
r = cmp(self.children[x].name,self.children[y].name)
return r
self.sorted_children = self.children.keys()
self.sorted_children.sort(cmp=childs_sort)
return self.sorted_children
def get_artist_all_tracks(self,start=0,request_count=0):
children = []
for album in self.sort_children():
children += album.get_children()
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_children(self,start=0,end=0):
children = []
for key in self.sort_children():
children.append(self.children[key])
if end == 0:
return children[start:]
else:
return children[start:end]
def get_child_count(self):
return len(self.children)
def get_item(self, parent_id = AUDIO_ARTIST_CONTAINER_ID):
item = DIDLLite.MusicArtist(self.id, parent_id, self.name)
return item
def get_id(self):
return self.id
def get_name(self):
return self.name
class Album(BackendItem):
logCategory = 'tracker_store'
def __init__(self, store, id, title, artist):
self.store = store
self.id = 'album.%d' % int(id)
self.name = unicode(title)
self.artist = unicode(artist)
self.cover = None
self.children = {}
self.sorted_children = None
def add_child(self, child):
_,id = child.id.split('.')
self.children[id] = child
def get_children(self,start=0,end=0):
children = []
if self.sorted_children != None:
for key in self.sorted_children:
children.append(self.children[key])
else:
def childs_sort(x,y):
r = cmp(self.children[x].track_nr,self.children[y].track_nr)
return r
self.sorted_children = self.children.keys()
self.sorted_children.sort(cmp=childs_sort)
for key in self.sorted_children:
children.append(self.children[key])
if end == 0:
return children[start:]
else:
return children[start:end]
def get_child_count(self):
return len(self.children)
def get_item(self, parent_id = AUDIO_ALBUM_CONTAINER_ID):
item = DIDLLite.MusicAlbum(self.id, parent_id, self.name)
item.childCount = self.get_child_count()
item.artist = self.artist
item.albumArtURI = self.cover
return item
def get_id(self):
return self.id
def get_name(self):
return self.name
def get_cover(self):
return self.cover
class Track(BackendItem):
logCategory = 'tracker_store'
def __init__(self,store,
id,parent_id,
file,title,
artist,album,genre,\
duration,\
track_number,\
size,mimetype):
self.store = store
self.id = 'song.%d' % int(id)
| self.parent_id = parent_id
self.path = unicode(file)
duration = str(duration).strip()
duration = duration.split('.')[0]
if len(duration) == 0:
duration = 0
seconds = int(duration)
hours = seconds / 3600
seconds = seconds - hours * 3600
minutes = seconds / 60
seconds = seconds - minutes * | 60
self.duration = ("%d:%02d:%02d") % (hours, minutes, seconds)
self.bitrate = 0
self.title = unicode(title)
self.artist = unicode(artist)
self.album = unicode(album)
self.genre = unicode(genre)
track_number = str(track_number).strip()
if len(track_number) == 0:
track_number = 1
self.track_nr = int(track_number)
self.cover = None
self.mimetype = str(mimetype)
self.size = int(size)
self.url = self.store.urlbase + str(self.id)
def get_children(self, start=0, end=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.debug("Track get_item %r @ %r" %(self.id,self.parent_id))
# create item
item = DIDLLite.MusicTrack(self.id,self.parent_id)
item.album = self.album
item.artist = self.artist
#item.date =
item.genre = self.genre
item.originalTrackNumber = self.track_nr
item.title = self.title
item.albumArtURI = self.cover
# add http resource
res = DIDLLite.Resource(self.url, ' |
voer-platform/vp.repo | vpr/rest_framework/urls.py | Python | agpl-3.0 | 724 | 0.002762 | """
Login and logout views for the browseable API.
Add these to your root URLconf if you're using the browseable API and
your API requires authentication.
The urls must be namespaced a | s 'rest_framework', and you should make sure
your authentication settings include `SessionAuthentication`.
urlpatterns = patterns('',
...
url(r'^auth', include('rest_framework.urls', namespace='rest_framework'))
)
"""
from django.conf.urls.defaults import patterns, url
template_name = {'template_name': 'rest_framework/login.html'}
urlpatterns = patterns('django.contrib.auth.views',
url(r'^login/$', 'login', template_name, name='login'),
url(r'^logout/$', 'logout' | , template_name, name='logout'),
)
|
pbmanis/acq4 | acq4/devices/Stage/calibration.py | Python | mit | 4,016 | 0.003237 | from __future__ import print_function
import numpy as np
import scipy.stats, scipy.optimize
import acq4.pyqtgraph as pg
class StageCalibration(object):
def __init__(self, stage):
self.stage = stage
self.framedelay = None
def calibrate(self, camera):
import imreg_dft # FFT image registration by Chris Gohlke; available via pip
n = 300
dx = 10e-6
self.move = None
self.camera = camera
self.offsets = np.empty((n, 2))
self.frames = []
self.index = 0
# current stage position
pos = self.stage.getPosition()
# where to move on each update
self.positions = np.zeros((n, 2))
self.positions[:,0] = pos[0] + np.arange(n) * dx
self.positions[:,1] = pos[1]
camera.sigNewFrame.connect(self.newFrame)
def newFrame(self, frame):
try:
if self.move is not None and not self.move.isDone():
# stage is still moving; discard frame
return
if self.framedelay is None:
# stage has stopped; discard 2 more frames to be sure
# we get the right image.
self.framedelay = pg.ptime.time() + 1./frame.info()['fps']
elif self.framedelay < frame.info()['time']:
# now we are ready to keep this frame.
self.framedelay = None
self.processFrame(frame)
except Exception:
pg.disconnect(self.camera.sigNewFrame, self.newFrame)
raise
def processFrame(self, frame):
self.frames.append(frame)
index = self.index
# update index for next iteration
self.index += 1
# decide whether to move the stage
finished = self.index >= self.positions.shape[0]
if not finished:
self.move = self.stage.moveTo(self.positions[self.index], 'slow')
# calculate offset (while stage moves no next location)
if index == 0:
offset = (0, 0)
else:
compareIndex = max(0, index-10)
offset, _ = imreg_dft.translation(fr | ame.getImage(), self.frames[compareIndex].getImage())
px = self.camera.getPixelSize()
offset = self.offsets[compareIndex] + offset.astype(float) * [px.x(), px.y()]
self.offsets[index] = offset
# finish up if there are no more positions
if finished:
pg.disconnect(self.camera.sigNewFrame, self.newFrame)
self.analyze()
def analyze(self):
# frames = []
# for frame in self.frames:
# frame | s.append(frame.getImage()[np.newaxis, ...])
# self.frameArray = np.concatenate(frames, axis=0)
# self.imageView = pg.image(self.frameArray)
# linear regression to determine scale between stage steps and camera microns
x = ((self.positions - self.positions[0])**2).sum(axis=1)**0.5
y = (self.offsets**2).sum(axis=1)**0.5
slope, yint, r, p, stdev = scipy.stats.linregress(x, y)
# subtract linear approximation to get residual error
y1 = x * slope + yint
self.xvals = x
self.error = y - y1
self.errorPlot = pg.plot(x, self.error, title='X axis error (slope = %0.2f um/step)' % (slope*1e6), labels={'left': ('Error', 'm'), 'bottom': ('position', 'steps')})
# fit residual to combination of sine waves
def fn(p, x):
return (p[2] * np.sin((x + p[0]) * 1 * p[1]) +
p[3] * np.sin((x + p[0]) * 2 * p[1]) +
p[4] * np.sin((x + p[0]) * 3 * p[1]) +
p[5] * np.sin((x + p[0]) * 4 * p[1]))
def erf(p, x, y):
return fn(p, x) - y
f0 = 6 * np.pi / x.max() # guess there are 3 cycles in the data
amp = self.error.max()
self.fit = scipy.optimize.leastsq(erf, [0, f0, amp, amp, amp, amp], (x, self.error))[0]
self.errorPlot.plot(x, fn(self.fit, x), pen='g')
|
inkenbrandt/EPAEN | prism/prism.py | Python | gpl-2.0 | 7,910 | 0.003287 | __author__ = 'jbellino'
import os
import csv
import gdal
import gdalconst
import zipfile as zf
import numpy as np
import pandas as pd
from unitconversion import *
prismGrid_shp = r'G:\archive\datasets\PRISM\shp\prismGrid_p.shp'
prismGrid_pts = r'G:\archive\datasets\PRISM\shp\prismGrid_p.txt'
prismProj = r'G:\archive\datasets\PRISM\shp\PRISM_ppt_bil.prj'
ncol = 1405
nrow = 621
max_grid_id = ncol * nrow
def getMonthlyPrecipData(year, month, mask=None, conversion=None):
# print 'Getting data for', year, month
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}{1:0>2d}_bil.bil'.format(year, month)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getAnnualPrecipData(year, mask=None, conversion=None):
# print 'Getting data for year', year
bil = r'/vsizip/G:\archive\datasets\PRISM\monthly\ppt\{0}\PRISM_ppt_stable_4kmM2_{0}_all_bil.zip\PRISM_ppt_stable_4kmM2_{0}_bil.bil'.format(year)
b = BilFile(bil, mask=mask)
data = b.data
if conversion is not None:
data *= conversion
# b.save_to_esri_grid('ppt_{}'.format(year), conversion_factor=mm_to_in)
return data
def getGridIdFromRowCol(row, col):
"""
Determines the PRISM grid id based on a row, col input.
"""
assert 1 <= row <= nrow, 'Valid row numbers are bewteen 1 and {}.'.format(nrow)
assert 1 <= col <= ncol, 'Valid col numbers are bewteen 1 and {}.'.format(ncol)
grid_id = ((row-1)*ncol)+col
return grid_id
def getRowColFromGridId(grid_id):
"""
Determines the row, col based on a PRISM grid id.
"""
assert 1 <= grid_id <= max_grid_id, 'Valid Grid IDs are bewteen 1 and {}, inclusively.'.format(max_grid_id)
q, r = divmod(grid_id, ncol)
return q+1, r
def writeGridPointsToTxt(prismGrid_shp=prismGrid_shp, out_file=prismGrid_pts):
"""
Writes the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
import arcpy
data = []
rowends = range(ncol, max_grid_id+1, ncol)
with arcpy.da.SearchCursor(prismGrid_shp, ['grid_code', 'row', 'col']) as cur:
rowdata = []
for rec in cur:
rowdata.append(rec[0])
if rec[2] in rowends:
data.append(rowdata)
rowdata = []
a = np.array(data, dtype=np.int)
np.savetxt(out_file, a)
def getGridPointsFromTxt(prismGrid_pts=prismGrid_pts):
"""
Returns an array of the PRISM grid id, row, and col for each feature in the PRISM grid shapefile.
"""
a = np.genfromtxt(prismGrid_pts, dtype=np.int, usemask=True)
return a
def makeGridMask(grid_pnts, grid_codes=None):
"""
Makes a mask with the same shape as the PRISM grid.
'grid_codes' is a list containing the grid id's of those cells to INCLUDE in your analysis.
"""
mask = np.ones((nrow, ncol), dtype=bool)
for row in range(mask.shape[0]):
mask[row] = np.in1d(grid_pnts[row], grid_codes, invert=True)
return mask
def downloadPrismFtpData(parm, output_dir=os.getcwd(), timestep='monthly', years=None, server='prism.oregonstate.edu'):
"""
Downloads ESRI BIL (.hdr) files from the PRISM FTP site.
'parm' is the parameter of interest: 'ppt', precipitation; 'tmax', temperature, max' 'tmin', temperature, min /
'tmean', temperature, mean
'timestep' is either 'monthly' or 'daily'. This string is used to direct the function to the right set of remote folders.
'years' is a list of the years for which data is desired.
"""
from ftplib import FTP
def handleDownload(block):
file.write(block)
# print ".\n"
# Play some defense
assert parm in ['ppt', 'tmax', 'tmean', 'tmin'], "'parm' must be one of: ['ppt', 'tmax', 'tmean', 'tmin']"
assert timestep in ['daily', 'monthly'], "'timestep' must be one of: ['daily', 'monthly']"
assert years is not None, 'Please enter a year for which data will be fetched.'
if isinstance(years, int):
years = list(years)
ftp = FTP(server)
print 'Logging into', server
ftp.login()
# Wrap everything in a try clause so we close the FTP connection gracefully
try:
for year in years:
dir = 'monthly'
if timestep == 'daily':
dir = timestep
dir_string = '{}/{}/{}'.format(dir, parm, year)
remote_files = []
ftp.dir(dir_string, remote_files.append)
for f_string in remote_files:
f = f_string.rsplit(' ')[-1]
if not '_all_bil' in f:
continue
print 'Downloading', f
if not os.path.isdir(os.path.join(output_dir, str(year))):
os.makedirs(os.path.join(output_dir, str(year)))
local_f = os.path.join(output_dir, str(year), f)
with open(local_f, 'wb') as file:
f_path = '{}/{}'.format(dir_string, f)
ftp.retrbinary('RETR ' + f_path, handleDownload)
except Exception as e:
print e
finally:
print('Closing the connection.')
ftp.close()
return
class BilFile(object):
"""
This class returns a BilFile object using GDAL to read the array data. Data units are in millimeters.
"""
def __init__(self, bil_file, mask=None):
self.bil_file = bil_file
self.hdr_file = bil_file[:-3]+'hdr'
gdal.GetDriverByName('EHdr').Register()
self.get_array(mask=mask)
self.originX = self.geotransform[0]
self.originY = self.geotransform[3]
self.pixelWidth = self.geotransform[1]
self.pixelHeight = self.geotransform[5]
def get_array(self, mask=None):
self.data = None
i | mg = gdal.Open(self.bil_file, gdalconst.GA_ReadOnly)
band = img.GetRasterBand(1)
self.nodatavalue = band.GetNoDataValue()
self.data = band.ReadAsArray()
self.data = np.ma.masked_where(self.data==self.nodatavalue, self.data)
if mask is not None:
self.data = np.ma.masked_where(mask==True, self.data)
self.ncol = img.RasterXSize
self.nrow = img.RasterYSize
self | .geotransform = img.GetGeoTransform()
def save_to_esri_grid(self, out_grid, conversion_factor=None, proj=None):
import arcpy
arcpy.env.overwriteOutput = True
arcpy.env.workspace = os.getcwd()
arcpy.CheckOutExtension('Spatial')
arcpy.env.outputCoordinateSystem = prismProj
if proj is not None:
arcpy.env.outputCoordinateSystem = proj
df = np.ma.filled(self.data, self.nodatavalue)
llx = self.originX
lly = self.originY - (self.nrow * -1 * self.pixelHeight)
point = arcpy.Point(llx, lly)
r = arcpy.NumPyArrayToRaster(df, lower_left_corner=point, x_cell_size=self.pixelWidth,
y_cell_size=-1*self.pixelHeight, value_to_nodata=self.nodatavalue)
if conversion_factor is not None:
r *= conversion_factor
r.save(out_grid)
def __extract_bil_from_zip(self, parent_zip):
with zf.ZipFile(parent_zip, 'r') as myzip:
if self.bil_file in myzip.namelist():
myzip.extract(self.bil_file, self.pth)
myzip.extract(self.hdr_file, self.pth)
return
def __clean_up(self):
try:
os.remove(os.path.join(self.pth, self.bil_file))
os.remove(os.path.join(self.pth, self.hdr_file))
except:
pass
if __name__ == '__main__':
grid_id = getGridIdFromRowCol(405, 972)
print grid_id
row, col = getRowColFromGridId(grid_id)
print row, col
|
indhub/mxnet | python/mxnet/gluon/data/sampler.py | Python | apache-2.0 | 4,279 | 0.001636 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Dataset sampler."""
__all__ = ['Sampler', 'SequentialSampler', 'RandomSampler', 'BatchSampler']
import numpy as np
class Sampler(object):
"""Base class for samplers.
All samplers should subclass `Sampler` and define `__iter__` and `__len__`
methods.
"""
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
"""Samples elements from [0, length) sequentially.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length):
self._length = length
def __iter__(self):
return iter(range(self._length))
def __len__(self):
return self._length
class RandomSampler(Sampler):
"""Samples elements from [0, length) randomly without replacement.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length):
self._length = length
def __iter__(self):
indices = np.arange(self._length)
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
class BatchSampler(Sampler):
"""Wraps over another `Sampler` and return mini-batches of samples.
Parameters
----------
sampler : Sampler
The source Sampler.
batch_size : int
Size of mini-batch.
last_batch : {'keep', 'discard', 'rollover'}
Specifies how the last batch is handled if batch_size does not evenly
divide sequence length.
If 'keep', the last batch will be returned directly, but will contain
less element than `batch_size` requires.
If 'discard', the last batch will be discarded.
If 'rollover', the remaining elements will be rolled over to the next
iteration.
Examples
--------
>>> sampler = gluon.data.SequentialSampler(10)
>>> batch_sampler = gluon.data.BatchSampler(sampler, 3, 'keep')
>>> list(batch_sampler)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
def __init__(self, sampler, batch_size, last_batch='keep'):
self._sampler = sampler
self._batch_size = batch_size
self._last_batch = last_batch
self._prev = []
def __iter__(self):
| batch, self._prev = self._prev, []
for i | in self._sampler:
batch.append(i)
if len(batch) == self._batch_size:
yield batch
batch = []
if batch:
if self._last_batch == 'keep':
yield batch
elif self._last_batch == 'discard':
return
elif self._last_batch == 'rollover':
self._prev = batch
else:
raise ValueError(
"last_batch must be one of 'keep', 'discard', or 'rollover', " \
"but got %s"%self._last_batch)
def __len__(self):
if self._last_batch == 'keep':
return (len(self._sampler) + self._batch_size - 1) // self._batch_size
if self._last_batch == 'discard':
return len(self._sampler) // self._batch_size
if self._last_batch == 'rollover':
return (len(self._prev) + len(self._sampler)) // self._batch_size
raise ValueError(
"last_batch must be one of 'keep', 'discard', or 'rollover', " \
"but got %s"%self._last_batch)
|
bioasp/caspo | caspo/console/__init__.py | Python | gpl-3.0 | 714 | 0 | # Copyright (c) 2014-2016, Santiago Videla
#
# This file is part of caspo.
#
# caspo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Pub | lic License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# caspo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with caspo. If not, see | <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
|
blaiseli/p4-phylogenetics | share/Examples/L_mcmc/G_posteriorSamples/Protein_2parts/sPostSamps.py | Python | gpl-2.0 | 1,242 | 0.004831 | read("d.nex")
read('sets.nex')
a = var.alignments[0]
a.setCharPartition('p1')
d = Data()
t = func.randomTree(taxNames=d.taxNames)
t.data = d
pNum=0
t.newComp(partNum=pNum, free=1, spec='wag')
t.newRMatrix(partNum=pNum, free=0, spec='wag')
t.setNGammaCat(partNum=pNum, nGammaCat=4)
t.newGdasrv(partNum=pNum, free=1, val=0.5)
t.setPInvar(partNum=pNum, free=0, val=0.0)
t.setRelRate(partNum=pNum, val=0.5)
pNum = 1
t.newComp(partNum=pNum, free=1, spec='jtt')
t.newRMatrix(partNum=pNum, free=0, spec='jtt')
t.setNGammaCat(partNum=pNum, nGammaCat=4)
t.newGdasrv(partNum=pNum, free=1, val=0.5)
t.setPInvar(partNum=pNum, free=1, val=0.1)
t.setRelRate(partNum=pNum, val=2.0)
t.model.relRatesAreFree = True
t.calcLogLike()
func.reseedCRandomizer(os.getpid())
t.calcLogLike()
ps = PosteriorSamples(t, | runNum=0, program='p4', verbose=3)
for sampNum in range(0,10):
t2 = ps.getSample(sampNum)
t2.data = d
t2.simulate()
ret = t2.data.simpleBigXSquared()
print ret[0], ret[1]
ps = PosteriorSamples(t, runNum=1, program='mrbayes', mbBaseName='mbout32 | ', verbose=3)
for sampNum in range(0,10):
t2 = ps.getSample(sampNum)
t2.data = d
t2.simulate()
ret = t2.data.simpleBigXSquared()
print ret[0], ret[1]
|
CingHu/neutron-ustack | neutron/db/firewall/firewall_db.py | Python | apache-2.0 | 23,675 | 0.000169 | # Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.db import common_db_mixin as base_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import firewall
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as const
LOG = logging.getLogger(__name__)
class FirewallRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a Firewall rule."""
__tablename__ = 'firewall_rules'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
firewall_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('firewall_policies.id'),
nullable=True)
shared = sa.Column(sa.Boolean)
protocol = sa.Column(sa.String(40))
ip_version = sa.Column(sa.Integer, nullable=False)
source_ip_address = sa.Column(sa.String(46))
destination_ip_address = sa.Column(sa.String(46))
source_port_range_min = sa.Column(sa.Integer)
source_port_range_max = sa.Column(sa.Integer)
destination_port_range_min = sa.Column(sa.Integer)
destination_port_range_max = sa.Column(sa.Integer)
action = sa.Column(sa.Enum('allow', 'deny', name='firewallrules_action'))
enabled = sa.Column(sa.Boolean)
position = sa.Column(sa.Integer)
class Firewall(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a Firewall resource."""
__tablename__ = 'firewalls'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
shared = sa.Column(sa.Boolean)
admin_state_up = sa.Column(sa.Boolean)
status = sa.Column(sa.String(16))
firewall_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('firewall_policies.id'),
nullable=True)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete='CASCADE'),
nullable=True,
unique=True)
class FirewallPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a Firewall Policy resource."""
__tablename__ = 'firewall_policies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
shared = sa.Column(sa.Boolean)
firewall_rules = orm.relationship(
FirewallRule,
backref=orm.backref('firewall_policies', cascade='all, delete'),
order_by='FirewallRule.position',
collection_class=ordering_list('position', count_from=1))
audited = sa.Column(sa.Boolean)
firewalls = orm.relationship(Firewall, backref='firewall_policies')
class Firewall_db_mixin(firewall.FirewallPluginBase, base_db.CommonDbMixin):
"""Mixin class for Firewall DB implementation."""
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _get_firewall(self, context, id):
try:
return self._get_by_id(context, Firewall, id)
except exc.NoResultFound:
raise firewall.FirewallNotFound(firewall_id=id)
def _get_firewall_policy(self, context, id):
try:
return self._get_by_id(context, FirewallPolicy, id)
except exc.NoResultFound:
raise firewall.FirewallPolicyNotFound(firewall_policy_id=id)
def _get_firewall_rule(self, context, id):
try:
return self._get_by_id(context, FirewallRule, id)
except exc.NoResultFound:
raise firewall.FirewallRuleNotFound(firewall_rule_id=id)
def _make_firewall_dict(self, fw, fields=None):
res = {'id': fw['id'],
'tenant_id': fw['tenant_id'],
'name': fw['name'],
'description': fw['description'],
'shared': fw['shared'],
'admin_state_up': fw['admin_state_up'],
'status': fw['status'],
'firewall_policy_id': fw['firewall_policy_id']}
return self._fields(res, fields)
def _make_firewall_policy_dict(self, firewall_policy, fields=None):
fw_rules = [rule['id'] for rule in firewall_policy['firewall_rules']]
firewalls = [fw['id'] for fw in firewall_policy['firewalls']]
res = {'id': firewall_policy['id'],
'tenant_id': firewall_policy['tenant_id'],
'name': firewall_policy['name'],
'description': firewall_policy['description'],
'shared': firewall_policy['shared'],
'audited': firewall_policy['audited'],
'firewall_rules': fw_rules,
'firewall_list': firewalls}
return self._fields(res, fields)
def _make_firewall_rule_dict(self, firewall_rule, fields=None):
position = None
# We return the position only if the firewall_rule is bound to a
# firewall_policy.
if firewall_rule['firewall_policy_id']:
position = firewall_rule['position']
src_port_range = self._get_port_range_from_min_max_ports(
firewall_rule['source_port_range_min'],
firewall_rule['source_port_range_max'])
dst_port_range = self._get_port_range_from_min_max_ports(
firewall_rule['destination_port_range_min'],
firewall_rule['destination_port_range_max'])
res = {'id': firewall_rule['id'],
'tenant_id': firewall_rule['tenant_id'],
'name': firewall_rule['name'],
'description': firewall_rule['description'],
'firewall_policy_id': firewall_rule['firewall_policy_id'],
'shared': firewall_rule['shared'],
'protocol': firewall_rule['prot | ocol'],
'ip_version': firewall_rule['ip_version'],
'source_ip_address': firewall_rule['source_ip_address'],
| 'destination_ip_address':
firewall_rule['destination_ip_address'],
'source_port': src_port_range,
'destination_port': dst_port_range,
'action': firewall_rule['action'],
'position': position,
'enabled': firewall_rule['enabled']}
return self._fields(res, fields)
def _set_rules_for_policy(self, context, firewall_policy_db, rule_id_list):
fwp_db = firewall_policy_db
with context.session.begin(subtransactions=True):
if not rule_id_list:
fwp_db.firewall_rules = []
fwp_db.audited = False
return
# We will first check if the new list of rules is valid
filters = {'id': [r_id for r_id in rule_id_list]}
rules_in_db = self._get_collection_query(context, FirewallRule,
filters=filters)
rules_dict = dict((fwr_db['id'], fwr_db) for fwr_db in rules_in_db)
for fwrule_id in rule_id_list:
if fwrule_id not in rules_dict:
# If we find an invalid rule in the list we
# do not perform the update since this breaks
# the integrity of this list.
raise firewall.Firewal |
paulmadore/Eric-IDE | 6-6.0.9/eric/Plugins/PluginWizardQMessageBox.py | Python | gpl-3.0 | 4,158 | 0.002165 | # -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the QMessageBox wizard plugin.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QDialog
from E5Gui.E5Application import e5App
from E5Gui.E5Action import E5Action
from E5Gui import E5MessageBox
# Start-Of-Header
name = "QMessageBox Wizard Plugin"
author = "Detlev Offenbach <detlev@die-offenbachs.de>"
autoactivate = True
deactivateable = True
version = "6.0.0"
className = "MessageBoxWizard"
packageName = "__core__"
shortDescription = "Show the QMessageBox wizard."
longDescription = """This plugin shows the QMessageBox wizard."""
pyqtApi = 2
python2Compatible = True
# End-Of-Header
error = ""
class MessageBoxWizard(QObject):
"""
Class implementing the QMessageBox wizard plugin.
"""
def __init__(self, ui):
"""
Constructor
@param ui reference to the user interface object (UI.UserInterface)
"""
super(MessageBoxWizard, self).__init__(ui)
self.__ui = ui
def activate(self):
"""
Public method to activate this plugin.
@return tuple of None and activation status (boolean)
"""
self.__initAction()
self.__initMenu()
return None, True
def deactivate(self):
"""
Public method to deactivate this plugin.
"""
menu = self.__ui.getMenu("wizards")
if menu:
menu.removeAction(self.action)
self.__ui.removeE5Actions([self.action], 'wizards')
def __initAction(self):
"""
Private method to initialize the action.
"""
self.action = E5Action(
self.tr('QMessageBox Wizard'),
self.tr('Q&MessageBox Wizard...'), 0, 0, self,
'wizards_qmessagebox')
self.action.setStatusTip(self.tr('QMessageBox Wizard'))
self.action.setWhatsThis(self.tr(
"""<b>QMessageBox Wizard</b>"""
"""<p>Th | is wizard opens a dialog for entering all the parameters"""
""" needed | to create a QMessageBox. The generated code is"""
""" inserted at the current cursor position.</p>"""
))
self.action.triggered.connect(self.__handle)
self.__ui.addE5Actions([self.action], 'wizards')
def __initMenu(self):
"""
Private method to add the actions to the right menu.
"""
menu = self.__ui.getMenu("wizards")
if menu:
menu.addAction(self.action)
def __callForm(self, editor):
"""
Private method to display a dialog and get the code.
@param editor reference to the current editor
@return the generated code (string)
"""
from WizardPlugins.MessageBoxWizard.MessageBoxWizardDialog import \
MessageBoxWizardDialog
dlg = MessageBoxWizardDialog(None)
if dlg.exec_() == QDialog.Accepted:
line, index = editor.getCursorPosition()
indLevel = editor.indentation(line) // editor.indentationWidth()
if editor.indentationsUseTabs():
indString = '\t'
else:
indString = editor.indentationWidth() * ' '
return (dlg.getCode(indLevel, indString), True)
else:
return (None, False)
def __handle(self):
"""
Private method to handle the wizards action.
"""
editor = e5App().getObject("ViewManager").activeWindow()
if editor is None:
E5MessageBox.critical(
self.__ui,
self.tr('No current editor'),
self.tr('Please open or create a file first.'))
else:
code, ok = self.__callForm(editor)
if ok:
line, index = editor.getCursorPosition()
# It should be done on this way to allow undo
editor.beginUndoAction()
editor.insertAt(code, line, index)
editor.endUndoAction()
|
cirocosta/avisenchente | tests/test_fetcher.py | Python | mit | 1,113 | 0.006289 | import unittest
import samples
from src.utils import iotsdk
from src.utils import fetcher
class TestFetcher(unittest.TestCase):
""" Tests src.utils.fetcher """
def setUp(self):
self.fetcher = fetcher.Fetcher("dummy_token")
pass
def test_initialization(self):
self.assertEqual(self.fetcher.token, "dummy_token")
self.assertIsInstance(self.fetcher.iot, iotsdk. | Iot)
def test_toDatabaseMeasure(self):
self.assertFalse(self.fetcher._toDatabaseMeasure(dict()))
self.assertFalse(self.fetcher._toDatabaseMeasure(\
samples.SAMPLE_FALSE_MEASURE))
self.assertTrue(self.fetcher._toDatabaseMeasure(\
samples.SAMPLE_TRUE_MEASURE))
def test_toDatabaseMeasureCollection(self):
self.assertFalse(self.fetcher._toDatabaseMeasureCollection(\
| samples.SAMPLE_FALSE_MEASURE_COLLECTION))
self.assertTrue(self.fetcher._toDatabaseMeasureCollection(\
samples.SAMPLE_TRUE_MEASURE_COLLECTION))
def test_fetch_data(self):
pass
if __name__ == "__main__":
unittest.main()
|
jldaniel/Gaia | Models/zdt2.py | Python | mit | 1,190 | 0.00084 | __author__ = 'jdaniel'
from GaiaSolve.model import Model
class ZDT2(Model):
def __init__(self):
super(ZDT2, self).__init__()
def evaluate(self):
g = 1.0 + 9.0*sum(self.x[1:])/(len(self.x) - 1)
f1 = self.x[0]
f2 = g*(1.0 - (f1/g)**2)
self | .obj = [f1, f2]
self.eqcon = []
self.neqcon = []
def number_of_design_variables(self):
return 30
def lower_bound(self):
return [0.0]*30
def upper_bound(self):
return [1.0]*30
def number_of_objectives(self):
return 2
def has_equality_constraints(self):
return False
def number_of_equality_constraints(self):
return 0
def has_inequality_constraints(self):
return False
def number_of_inequality_con | straints(self):
return 0
def decision_variable_names(self):
x_names = []
for i in range(30):
x_names.append('x' + str(i))
return x_names
def objective_variable_names(self):
return ['f1', 'f2']
def equality_constraint_variable_names(self):
return []
def inequality_constraint_variable_names(self):
return [] |
crowning-/dash | contrib/testgen/base58.py | Python | mit | 2,973 | 0.007064 | # Copyright (c) 2012 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while l | ong_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Ret | urn 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
cnodell/mrps | mrps.py | Python | mit | 1,477 | 0.003385 | #!/usr/bin/env python
import sys
import configparser
import os
import | shutil
from PyQt5 import QtWidgets
from PyQt5 import QtWebKitWidgets
from PyQt5 import QtCore
# Read config file
home_dir = os.path.expanduser("~")
conf_path = os.path.join(home_dir, ".config/mrps/mrps.conf")
config = configparser.ConfigParser(delimiters=('='))
config.read(conf_path)
def clean_up():
os.remove(html_file_full)
shutil.rmtree(os.path.join(o_file_dir, "reveal.js"))
app = QtWidgets.QApplication(sys.argv)
app.aboutToQuit.connect(cl | ean_up)
if len(sys.argv) == 2:
o_file_full = os.path.abspath(sys.argv[1])
else:
o_file_full = QtWidgets.QFileDialog.getOpenFileName()[0]
if o_file_full:
o_file_dir = os.path.dirname(o_file_full)
o_file_name = os.path.basename(os.path.normpath(o_file_full))
o_file_name_bare = os.path.splitext(o_file_name)[0]
html_file_full = os.path.join(o_file_dir, o_file_name_bare + ".html")
shutil.copytree(os.path.normpath(config['DEFAULT']['revealjs_path']), os.path.join(o_file_dir, "reveal.js"))
md_file = open(o_file_full, 'r')
md_content = md_file.read()
md_file.close()
f = open(html_file_full, 'w')
f.write(config['DEFAULT']['html_top'] + '\n\n' +
md_content + '\n\n' +
config['DEFAULT']['html_bottom'])
f.close()
web = QtWebKitWidgets.QWebView()
web.load(QtCore.QUrl('file://' + html_file_full))
web.show()
sys.exit(app.exec_())
else:
exit()
|
indexofire/feincms-markup | feincms_markup/__init__.py | Python | mit | 446 | 0.004484 | # -*- coding: utf-8 -*-
VERSION = (0, 0, 1, | 'alpha', 0)
__version__ = '.'.join(map(str, VERSION))
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
| version = '%s %s %s' % (version, VERSION[3], VERSION[4])
return version
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/distributions/python/ops/bijectors/__init__.py | Python | bsd-2-clause | 2,469 | 0.002835 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may ob | tain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| # See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector Ops.
@@Affine
@@AffineLinearOperator
@@Bijector
@@Chain
@@CholeskyOuterProduct
@@ConditionalBijector
@@Exp
@@Identity
@@Inline
@@Invert
@@PowerTransform
@@Sigmoid
@@SigmoidCentered
@@SoftmaxCentered
@@Softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.contrib.distributions.python.ops.bijectors.affine import *
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import *
from tensorflow.contrib.distributions.python.ops.bijectors.chain import *
from tensorflow.contrib.distributions.python.ops.bijectors.cholesky_outer_product import *
from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import *
from tensorflow.contrib.distributions.python.ops.bijectors.exp import *
from tensorflow.contrib.distributions.python.ops.bijectors.inline import *
from tensorflow.contrib.distributions.python.ops.bijectors.invert import *
from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import *
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import *
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid_centered import *
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import *
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import *
from tensorflow.python.ops.distributions.bijector import *
from tensorflow.python.ops.distributions.identity_bijector import Identity
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
Jumpscale/web | pythonlib/eve/tests/test_version.py | Python | apache-2.0 | 70 | 0 | # -*- coding: utf-8 | -*-
API_VERSION = 'v1'
| DOMAIN = {'contacts': {}}
|
GLolol/PyLink | test/test_protocol_p10.py | Python | mpl-2.0 | 2,145 | 0.000932 | """
Tests for protocols/p10
"""
import unittest
from pylinkirc.protocols import | p10
class P10UIDGeneratorTest(unittest.TestCase):
def setUp(self):
self.uidgen = p10.P10UIDGenerator('HI')
def test_initial_UID(self):
expected = [
"HIAAA",
"HIAAB",
"HIAAC",
"HIAAD",
"HIAAE",
"HIAAF"
]
self.uidgen.counter = 0
actual = [self.uidgen.next_uid() for i in range(6)]
| self.assertEqual(expected, actual)
def test_rollover_first_lowercase(self):
expected = [
"HIAAY",
"HIAAZ",
"HIAAa",
"HIAAb",
"HIAAc",
"HIAAd",
]
self.uidgen.counter = 24
actual = [self.uidgen.next_uid() for i in range(6)]
self.assertEqual(expected, actual)
def test_rollover_first_num(self):
expected = [
"HIAAz",
"HIAA0",
"HIAA1",
"HIAA2",
"HIAA3",
"HIAA4",
]
self.uidgen.counter = 26*2-1
actual = [self.uidgen.next_uid() for i in range(6)]
self.assertEqual(expected, actual)
def test_rollover_second(self):
expected = [
"HIAA8",
"HIAA9",
"HIAA[",
"HIAA]",
"HIABA",
"HIABB",
"HIABC",
"HIABD",
]
self.uidgen.counter = 26*2+10-2
actual = [self.uidgen.next_uid() for i in range(8)]
self.assertEqual(expected, actual)
def test_rollover_third(self):
expected = [
"HIE]9",
"HIE][",
"HIE]]",
"HIFAA",
"HIFAB",
"HIFAC",
]
self.uidgen.counter = 5*64**2 - 3
actual = [self.uidgen.next_uid() for i in range(6)]
self.assertEqual(expected, actual)
def test_overflow(self):
self.uidgen.counter = 64**3-1
self.assertTrue(self.uidgen.next_uid())
self.assertRaises(RuntimeError, self.uidgen.next_uid)
if __name__ == '__main__':
unittest.main()
|
godiard/typing-turtle-activity | balloongame.py | Python | gpl-3.0 | 13,291 | 0.004439 | # Copyright 2008 by Kate Scheppke and Wade Brainerd.
# This file is part of Typing Turtle.
#
# Typing Turtle is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Typing Turtle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Typing Turtle. If not, see <http://www.gnu.org/licenses/>.
import math
import random, datetime
from gettext import gettext as _
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('PangoCairo', '1.0')
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import Pango
from gi.repository import PangoCairo
import medalscreen
BALLOON_COLORS = [
(65535, 0, 0),
(0, 0, 65535),
(65535, 32768, 0),
(0, 32768, 65535),
]
class Balloon:
def __init__(self, x, y, vx, vy, word):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.word = word
self.size = max(100, 50 + len(word) * 20)
self.color = random.choice(BALLOON_COLORS)
class BalloonGame(Gtk.VBox):
def __init__(self, lesson, activity):
GObject.GObject.__init__(self)
self.lesson = lesson
self.activity = activity
# Build title bar.
title = Gtk.Label()
title.set_markup("<span size='20000'><b>" + lesson['name'] + "</b></span>")
title.set_alignment(1.0, 0.0)
stoplabel = Gtk.Label(label=_('Go Back'))
stopbtn = Gtk.Button()
stopbtn.add(stoplabel)
stopbtn.connect('clicked', self.stop_cb)
hbox = Gtk.HBox()
hbox.pack_start(stopbtn, False, False, 10)
hbox.pack_end(title, False, False, 10)
# Build the game drawing area.
self.area = Gtk.DrawingArea()
self.draw_cb_id = self.area.connect("draw", self.draw_cb)
# Connect keyboard grabbing and releasing callbacks.
self.area.connect('realize', self.realize_cb)
self.area.connect('unrealize', self.unrealize_cb)
self.pack_start(hbox, False, False, 10)
self.pack_start(self.area, True, True, 0)
self.show_all()
# Initialize the game data.
self.balloons = []
self.score = 0
self.spawn_delay = 10
self.count = 0
self.count_left = self.lesson.get('length', 60)
self.medal = None
self.finished = False
# Start the animation loop running.
self.update_timer = GObject.timeout_add(20, self.tick, priority=GObject.PRIORITY_HIGH_IDLE+30)
def realize_cb(self, widget):
self.activity.add_events(Gdk.EventMask.KEY_PRESS_MASK)
self.key_press_cb_id = self.activity.connect('key-press-event', self.key_cb)
# Clear the mouse cursor.
#pixmap = Gdk.Pixmap(widget.window, 10, 10)
#color = Gdk.Color()
#cursor = Gdk.Cursor.new(pixmap, pixmap, color, color, 5, 5)
#widget.window.set_cursor(cursor)
def unrealize_cb(self, widget):
self.activity.disconnect(self.key_press_cb_id)
def stop_cb(self, widget):
# Stop the animation loop.
if self.update_timer:
try:
GObject.source_remove(self.update_timer)
except:
pass # Try remove instance, if not found, just pass
self.activity.pop_screen()
def key_cb(self, widget, event):
# Ignore hotkeys.
if event.get_state() & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK):
return False
# Extract information about the key pressed.
key = Gdk.keyval_to_unicode(event.keyval)
if key != 0: key = chr(key)
if self.finished:
key_name = Gdk.keyval_name(event.keyval)
if key_name == 'Return':
self.activity.pop_screen()
# Show the new medal if there was one.
if self.medal:
self.activity.push_screen(medalscreen.MedalScreen(self.medal, self.activity))
else:
for b in self.balloons:
if b.word[0] == key:
b.word = b.word[1:]
self.add_score(1)
# Pop the balloon if it's been typed.
if len(b.word) == 0:
self.balloons.remove(b)
self.add_score(100)
self.queue_draw_balloon(b)
break
return False
def update_balloon(self, b):
b.x += b.vx
b.y += b.vy
if b.x < 100 or b.x >= self.bounds.width - 100:
b.vx = -b.vx
if b.y < -100:
self.balloons.remove(b)
self.queue_draw_balloon(b)
def tick(self):
if self.finished:
return False
self.bounds = self.area.get_allocation()
for b in self.balloons:
self.update_balloon(b)
self.spawn_delay -= 1
if self.count_left >= 0 and self.spawn_delay <= 0:
self.count += 1
self.count_left -= 1
word = random.choice(self.lesson['words'])
x = random.randint(100, self.bounds.width - 100)
y = self.bounds.height + 100
vx = random.uniform(-2, 2)
vy = -2 #random.uniform(-5, -3)
b = Balloon(x, y, vx, vy, word)
self.balloons.append(b)
if self.count < 10:
delay = 200
elif self.count < 20:
delay = 150
else:
delay = 100
self.spawn_delay = random.randint(delay-20, delay+20)
if self.count_left <= 0 and len(self.balloons) == 0:
self.finish_game()
return True
def draw_results(self, cr):
# Draw background.
w = self.bounds.width - 400
h = self.bounds.height - 200
x = self.bounds.width/2 - w/2
y = self.bounds.height/2 - h/2
cr.set_source_rgb(0.762, 0.762, 0.762)
cr.rectangle(x, y, w, h)
cr.fill()
cr.set_source_rgb(0, 0, 0)
cr.rectangle(x, y, w, h)
cr.stroke()
# Draw text
title = _('You finished!') + '\n'
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Serif Bold')
fd.set_size(16 * Pango.SCALE)
pango_layout.set_font_description(fd)
pango_layout.set_text(title,
len(title))
size = pango_layout.get_size()
tx = x + (w / 2) - (size[0] / Pango.SCALE) / 2
ty = y + 100
cr.move_to(tx, ty)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
report = ''
report += _('Your score was %(score)d.') % { 'score': self.score } + '\n'
if self.medal:
report += _('You ea | rned a %(type)s medal!') % self.medal + '\n'
| report += '\n'
report += _('Press the ENTER key to continue.')
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Times')
fd.set_size(12 * Pango.SCALE)
pango_layout.set_font_description(fd)
pango_layout.set_text(report, len(report))
size = pango_layout.get_size()
sx = x + w / 2 - (size[0] / Pango.SCALE) / 2
sy = y + 200
cr.move_to(sx, sy)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
def finish_game(self):
self.finished = True
# Add to the lesson history.
report = {
'lesson': self.l |
johnnoone/salt-targeting | src/salt/utils/__init__.py | Python | mit | 537 | 0.001862 | '''
salt.utils
~~~~~~~~~~
'''
class lazy_property(object):
'''
meant to be used for lazy evaluation of | an object attribute.
property should represent non-mutable data, as it replaces itself.
http://stackoverflow.com/a/6849299/564003
'''
def __init__(self, fget):
self.fget = fget
self.func_name = fget.__name__
def __get__(self, obj, cls):
if obj is None:
| return None
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
|
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/theano/compile/nanguardmode.py | Python | mit | 11,118 | 0.00018 | from __future__ import print_function
import collections
import logging
from six.moves import StringIO
import numpy as np
import theano
from theano.configparser import config
import theano.tensor as T
import theano.sandbox.cuda as cuda
from theano.compile import Mode
logger = logging.getLogger("theano.compile.nanguardmode")
def flatten(l):
"""
Turns a nested graph of lists/tuples/other objects into a list of objects.
Parameters
----------
l : list/tuple/other objects
Might be nested.
Returns
-------
object
A flattened list of objects.
"""
if isinstance(l, (list, tuple, collections.ValuesView)):
rval = []
for elem in l:
if isinstance(elem, (list, tuple)):
rval.extend(flatten(elem))
else:
rval.append(elem)
else:
return [l]
return rval
def contains_nan(arr, node=None):
"""
Test whether a numpy.ndarray contains any `np.nan` values.
Parameters
----------
arr : np.ndarray or output of any Theano op
node : None or an Apply instance.
If arr is the output of a Theano op, the node associated to it.
Returns
-------
contains_nan : bool
`True` if the array contains any `np.nan` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.nan`'s using `np.isnan(np.min(ndarray))`.
This approach is faster and more memory efficient than the obvious
alternative, calling `np.any(np.isnan(ndarray))`, which requires the
construction of a boolean array with the same shape as the input array.
"""
if isinstance(arr, theano.gof.type.CDataType._cdata_type):
return False
elif isinstance(arr, np.random.mtrand.RandomState):
return False
elif arr.size == 0:
return False
elif cuda.cuda_available and isinstance(arr, cuda.CudaNdarray):
if (hasattr(theano.sandbox, 'rng_mrg') and
isinstance(
node.op,
# It store ints in float container
theano.sandbox.rng_mrg.GPU_mrg_uniform)):
return False
else:
compile_gpu_func(True, False, False)
return np.isnan(f_gpumin(arr.reshape(arr.size)))
return np.isnan(np.min(arr))
def contains_inf(arr, node=None):
"""
Test whether a numpy.ndarray contains any `np.inf` values.
Parameters
----------
arr : np.ndarray or output of any Theano op
node : None or an Apply instance.
If the output of a Theano op, the node associated to it.
Returns
-------
contains_inf : bool
`True` if the array contains any `np.inf` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.inf`'s by determining whether the
values returned by `np.nanmin(arr)` and `np.nanmax(arr)` are finite.
This approach is more memory efficient than the obvious alternative,
calling `np.any(np.isinf(ndarray))`, which requires the construction of a
boolean array with the same shape as the input array.
"""
if isinstance(arr, theano.gof.type.CDataType._cdata_type):
return False
elif isinstance(arr, np.random.mtrand.RandomState):
return False
elif arr.size == 0:
return False
elif cuda.cuda_available and isinstance(arr, cuda.CudaNdarray):
if (hasattr(theano.sandbox, 'rng_mrg') and
isinstance(
node.op,
# It store ints in float container
theano.sandbox.rng_mrg.GPU_mrg_uniform)):
return False
else:
compile_gpu_func(False, True, False)
return (np.isinf(f_gpumin(arr.reshape(arr.size))) or
np.isinf(f_gpumax(arr.reshape(arr.size))))
return np.isinf(np.nanmax(arr)) or np.isinf(np.nanmin(arr))
f_gpumin = None
f_gpumax = None
f_gpuabsmax = None
def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
""" compile utility function used by contains_nan and contains_inf
"""
global f_gpumin, f_gpumax, f_gpuabsmax
if not cuda.cuda_available:
return
guard_input = cuda.fvector('nan_guard')
cuda_compile_failed = False
if (nan_is_error or inf_is_error) and f_gpumin is None:
try:
f_gpumin = theano.function(
[guard_input], T.min(guard_input),
mode='FAST_RUN'
| )
| except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
if inf_is_error and not cuda_compile_failed and f_gpumax is None:
try:
f_gpumax = theano.function(
[guard_input], T.max(guard_input),
mode='FAST_RUN'
)
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
try:
f_gpuabsmax = theano.function(
[guard_input], T.max(T.abs_(guard_input)),
mode='FAST_RUN'
)
except RuntimeError:
# This can happen if cuda is available, but the
# device is in exclusive mode and used by another
# process.
cuda_compile_failed = True
class NanGuardMode(Mode):
"""
A Theano compilation Mode that makes the compiled function automatically
detect NaNs and Infs and detect an error if they occur.
Parameters
----------
nan_is_error : bool
If True, raise an error anytime a NaN is encountered.
inf_is_error : bool
If True, raise an error anytime an Inf is encountered. Note that some
pylearn2 modules currently use np.inf as a default value (e.g.
mlp.max_pool) and these will cause an error if inf_is_error is True.
big_is_error : bool
If True, raise an error when a value greater than 1e10 is encountered.
Note
----
We ignore the linker parameter
"""
# We currently loose the 3 first params frequently, when calling
# mode.including() and variant.
def __init__(self, nan_is_error=None, inf_is_error=None, big_is_error=None,
optimizer='default', linker=None):
self.provided_optimizer = optimizer
if nan_is_error is None:
nan_is_error = config.NanGuardMode.nan_is_error
if inf_is_error is None:
inf_is_error = config.NanGuardMode.inf_is_error
if big_is_error is None:
big_is_error = config.NanGuardMode.big_is_error
assert nan_is_error or inf_is_error or big_is_error
compile_gpu_func(nan_is_error, inf_is_error, big_is_error)
def do_check_on(var, nd, f, is_input):
"""
Checks `var` for NaNs / Infs. If detected, raises an exception
and / or prints information about `nd`, `f`, and `is_input` to
help the user determine the cause of the invalid values.
Parameters
----------
var : numpy.ndarray
The value to be checked.
nd : theano.gof.Apply
The Apply node being executed.
f : callable
The thunk for the apply node.
is_input : bool
If True, `var` is an input to `nd`.
If False, it is an output.
"""
error = False
sio = StringIO()
if nan_is_error:
if contains_nan(var, nd):
print('NaN detected', file=sio)
error = True
if inf_is_error:
if contains_inf(var, nd):
print('Inf detected', file=sio)
error = True
if big_is_error:
err = False
if isinstance(var, theano.gof.type.CDataType._cdata_type):
err = False
el |
almarklein/bokeh | sphinx/source/tutorial/exercises/scatter.py | Python | bsd-3-clause | 2,389 | 0.001674 | from __future__ import division
import numpy as np
from bokeh.plotting import figure, HBox, output_file, show, VBox
from bokeh.models import Range1d
# create some data using python lists
x1 = [1, 2, 5, 7, -8, 5, 2, 7, 1, -3, -5, 1.7, 5.4, -5]
y1 = [5, 6, -3, 1.5, 2, 1, 1, 9, 2.4, -3, 6, 8, 2, 4]
# create some data using numpy arrays
x2 = np.random.random(size=100) * 20 - 10
y2 = np.random.random(size=100) * 20 - 10
# EXERCISE: create some data for x3 and y3 however you like
# EXERCISE: output static HTML file
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
# EXERCISE: create two Range1d objects to reuse in the plots. Use the [-10, 10]
# for the bounds. Note: Range1d's are initialized like: Range1d(start=0, end=1)
# EXERCISE: Plot all the sets of points on different plots p1, p2, p3. Use the
# ranges above for `x_ran | ge` and `y_range` for each figure. Set different colors
# as well. Try setting line_color and fill_color instead of just color. You can
# also set alpha, line_alpha, and fill_alpha if you like. Set tools to TOOLS on
# the figures. Change the value of the 'marker' parameter, "circle", "square",
# "triangle", etc. One example is given
p1 = figure(x_range=xr, y_range=yr, tools=TOOLS, plot_width=300, plot_ | height=300)
p1.scatter(x1, y1, size=12, color="red", alpha=0.5)
# EXERCISE: Try panning and zooming one of the plots with another one visible!
# Set the plot_width and plot_height to smaller if necessary
# EXERCISE: create a new figure p4
# Lets plot 4000 circles, you can play around with this if you like
N = 4000
# Create a bunch of random points, radii and colors for plotting
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = [
"#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))
]
# EXERCISE: use the `circle` renderer to scatter all the circles. Set the
# 'fill_color' to the colors above, the 'line_color' to None, and the 'radius'
# to the radii. Also try setting the fill_alpha to something less than one.
# Use TOOLS from above to set a tools parameter.
# NOTE: since we are passing 'radius' as a parameter, the size of the circles
# is computed in **data** space, not in pixels. If you'd like to specify
# radii in pixels, use: radius_units="screen"
# show the plots arrayed in a layout
show(VBox(HBox(p1, p2, p3), p4))
|
laginha/yard | src/yard/version.py | Python | mit | 1,888 | 0.009534 | #!/usr/bin/env python
# encoding: utf-8
from yard.exceptions import NoDefaultVersion
from yard.consts import RESOURCE_VERSION_RE
import re
class Metaclass(type):
def __getattr__(self, attrname):
return getattr(self.versions[self.default], attrname)
class VersionController(object):
__metaclass__ = Metaclass
re_version = re.compile( RESOURCE_VERSION_RE )
@classmethod
def preprocess(cls, api):
cls.api = api
if hasattr(cls, 'default'):
cls.default_resource = cls.versions[ cls.default ]
cls.description = cls.default_resource.description
else:
raise NoDefaultVersion()
for version_name, resource in cls.versions.items():
resource.preprocess(cls.api, version_name=version_name)
def __init__(self, routes):
self.routes = routes
def get_documentation(self):
resource = self.versions[self.default](self.routes)
return resource.get_documentation()
def get_version(self, request):
http_accept = request.META.get('HTTP_ACCEPT', '')
match = self.re_version.match( ht | tp_accept )
if match and match.group(1):
return match.group(1)
return request.GET.get('version', self.default)
def handle_request(self, request, * | *kwargs):
def dispatch(resource):
return resource(self.routes).handle_request(request, **kwargs)
requested_version = self.get_version( request )
if requested_version in self.versions:
return dispatch( self.versions[requested_version] )
elif hasattr(self, requested_version):
alias_version = getattr(self, requested_version)
if alias_version in self.versions:
return dispatch( self.versions[alias_version] )
return dispatch( self.default_resource )
|
DXCanas/content-curation | performance/run_perftests.py | Python | mit | 975 | 0.002051 | import sys
import gevent
from locust.env import Environment
from locust.event import EventHook
from locust.log import setup_logging
from locust.stats import stats_printer
from locustfile import StudioDesktopBrowserUser
setup_logging("DEBUG", None)
def error_output(*args, **kwargs):
print("Error: {}, {}".format(args, kwargs))
failure_hook = EventHook()
failure_hook.add_listener(error_output)
# setup Environment and Runner
env = Environment(user_classes=[StudioDesktopBrowserUser])
env.events.request_failure = failure_hook
env.create_local_runner()
# start a WebUI instance
en | v.create_web_ui("127.0.0.1", 8089)
# start a greenlet that periodically outputs the current stats
gevent.spawn(stats_printer(env.stats))
# star | t the test
env.runner.start(10, hatch_rate=10)
# in 60 seconds stop the runner
gevent.spawn_later(60, lambda: env.runner.quit())
# wait for the greenlets
env.runner.greenlet.join()
# stop the web server for good measures
env.web_ui.stop()
|
yichoi/jerryscript | tools/run-tests.py | Python | apache-2.0 | 16,290 | 0.002394 | #!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import collections
import hashlib
import os
import subprocess
import sys
import settings
OUTPUT_DIR = os.path.join(settings.PROJECT_DIR, 'build', 'tests')
Options = collections.namedtuple('Options', ['name', 'build_args', 'test_args'])
Options.__new__.__defaults__ = ([], [])
OPTIONS_PROFILE_MIN = ['--profile=minimal']
OPTIONS_PROFILE_ES51 = [] # NOTE: same as ['--profile=es5.1']
OPTIONS_PROFILE_ES2015 = ['--profile=es2015-subset']
OPTIONS_DEBUG = ['--debug']
OPTIONS_SNAPSHOT = ['--snapshot-save=on', '--snapshot-exec=on', '--jerry-cmdline-snapshot=on']
OPTIONS_UNITTESTS = ['--unittests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on',
'--line-info=on', '--mem-stats=on']
OPTIONS_DOCTESTS = ['--doctests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on']
# Test options for unittests
JERRY_UNITTESTS_OPTIONS = [
Options('unittests-es2015_subset',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES2015),
Options('unittests-es2015_subset-debug',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
Options('doctests-es2015_subset',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES2015),
Options('doctests-es2015_subset-debug',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
Options('unittests-es5.1',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51),
Options('unittests-es5.1-debug',
OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('doctests-es5.1',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51),
Options('doctests-es5.1-debug',
OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG)
]
# Test options for jerry-tests
JERRY_TESTS_OPTIONS = [
Options('jerry_tests-es5.1',
OPTIONS_PROFILE_ES51),
Options('jerry_tests-es5.1-snapshot',
OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT,
['--snapshot']),
Options('jerry_tests-es5.1-debug',
OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('jerry_tests-es5.1-debug-snapshot',
OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
['--snapshot']),
Options('jerry_tests-es5.1-debug-cpointer_32bit',
OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + ['--cpointer-32bit=on', '--mem-heap=1024']),
Options('jerry_tests-es5.1-debug-external_context',
OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + ['--jerry-libc=off', '--external-context=on']),
Options('jerry_tests-es2015_subset-debug',
OPTIONS_PROFILE_ES2015 + OPTIONS_DEBUG),
]
# Test options for jerry-test-suite
JERRY_TEST_SUITE_OPTIONS = JERRY_TESTS_OPTIONS[:]
JERRY_TEST_SUITE_OPTIONS.extend([
Options('jerry_test_suite-minimal',
OPTIONS_PROFILE_MIN),
Options('jerry_test_suite-minimal-snapshot',
OPTIONS_PROFILE_MIN + OPTIONS_SNAPSHOT,
['--snapshot']),
Options('jerry_test_suite-minimal-debug',
OPTIONS_PROFILE_MIN + OPTIONS_DEBUG),
Options('jerry_test_suite-minimal-debug-snapshot',
OPTIONS_PROFILE_MIN + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
['--snapshot']),
Options('jerry_test_suite-es2015_subset',
OPTIONS_PROFILE_ES2015),
Options('jerry_test_suite-es2015_subset-snapshot',
OPTIONS_PROFILE_ES2015 + OPTIONS_SNAPSHOT,
['--snapshot']),
Options('jerry_test_suite-es2015_subset-debug-snapshot',
OPTIONS_PROFILE_ES2015 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG,
['--snapshot']),
])
# Test options for test262
TEST262_TEST_SUITE_OPTIONS = [
Options('test262_tests')
]
# Test options for jerry-debugger
DEBUGGER_TEST_OPTIONS = [
Options('jerry_debugger_tests',
['--debug', '--jerry-debugger=on', '--jerry-libc=off'])
]
# Test options for buildoption-test
JERRY_BUILDOPTIONS = [
Options('buildoption_test-lto',
['--lto=on']),
Options('buildoption_test-error_messages',
['--error-messages=on']),
Options('buildoption_test-all_in_one',
['--all-in-one=on']),
Options('buildoption_test-valgrind',
['--valgrind=on']),
Options('buildoption_test-mem_stats',
['--mem-stats=on']),
Options('buildoption_test-show_opcodes',
['--show-opcodes=on']),
Options('buildoption_test-show_regexp_opcodes',
['--show-regexp-opcodes=on']),
Options('buildoption_test-compiler_default_libc',
['--jerry-libc=off']),
Options('buildoption_test-cpointer_32bit',
['--jerry-libc=off', '--compile-flag=-m32', '--cpointer-32bit=on', '--system-allocator=on']),
Options('buildoption_test-external_context',
['--jerry-libc=off', '--external-context=on']),
Options('buildoption_test-shared_libs',
['--jerry-libc=off', '--shared-libs=on']),
Options('buildoption_test-cmdline_test',
['--jerry-cmdline-test=on']),
Options('buildoption_test-cmdline_snapshot',
['--jerry-cmdline-snapshot=on']),
]
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--toolchain', metavar='FILE',
help='Add toolchain file')
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print out failing tests')
parser.add_argument('--buildoptions', metavar='LIST',
help='Add a comma separated list of extra build options to each test')
parser.add_argument('--skip-list', metavar='LIST',
help='Add a comma separated list of patterns of the excluded JS-tests')
parser.add_argument('--outdir', metavar='DIR', default=OUTPUT_DIR,
help='Specify output directory (default: %(default)s)')
parser.add_argument('--check-signed-off', metavar='TYPE', nargs='?',
choices=['strict', 'tolerant', 'travis'], const='strict',
help='Run signed-off check (%(choices)s; default type if not given: %(const)s)')
parser.add_argument('--check-cppcheck', action='store_true',
help='Run cppcheck')
parser.add_argument('--check-dox | ygen', action='store_true',
help='Run doxygen')
parser.add_argument('--check-pylint', action='store_true',
help='Run pylint')
parser.add_argument('--check-vera', action='store_true',
| help='Run vera check')
parser.add_argument('--check-license', action='store_true',
help='Run license check')
parser.add_argument('--check-magic-strings', action='store_true',
help='Run "magic string source code generator should be executed" check')
parser.add_argument('--jerry-debugger', action='store_true',
help='Run jerry-debugger tests')
parser.add_argument('--jerry-tests', action='store_true',
help='Run jerry-tests')
parser.add_argument('--jerry-test-suite', action='store_true',
help='Run jerry-test-suite')
parser.add_argument('--test262', action='store_true',
help='Run test262')
parser.add_argument('--unittests', action='store_true',
help='Run unittests (including doctests)')
parser. |
rohitranjan1991/home-assistant | homeassistant/components/mqtt/debug_info.py | Python | mit | 8,644 | 0.000925 | """Helper to handle a set of topics to subscribe to."""
from __future__ import annotations
from collections import deque
from collections.abc import Callable
import datetime as dt
from functools import wraps
from typing import Any
import attr
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt as dt_util
from | .const import ATTR_DISCOVERY_PAYLOAD, ATTR_DISCOVERY_TOPIC
from .models import MessageCallbackType, PublishPayloadType
DATA_MQTT_DEBUG_INFO = "mqtt_debug_info"
STORED_MESSAGES = 10
def initialize(hass: HomeAssistant):
"""Initialize MQTT debug info."""
hass.data[DATA_MQTT_DEBUG_INFO] = {"entities": {}, "tri | ggers": {}}
def log_messages(
hass: HomeAssistant, entity_id: str
) -> Callable[[MessageCallbackType], MessageCallbackType]:
"""Wrap an MQTT message callback to support message logging."""
def _log_message(msg):
"""Log message."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
messages = debug_info["entities"][entity_id]["subscriptions"][
msg.subscribed_topic
]["messages"]
if msg not in messages:
messages.append(msg)
def _decorator(msg_callback: MessageCallbackType) -> MessageCallbackType:
@wraps(msg_callback)
def wrapper(msg: Any) -> None:
"""Log message."""
_log_message(msg)
msg_callback(msg)
setattr(wrapper, "__entity_id", entity_id)
return wrapper
return _decorator
@attr.s(slots=True, frozen=True)
class TimestampedPublishMessage:
"""MQTT Message."""
topic: str = attr.ib()
payload: PublishPayloadType = attr.ib()
qos: int = attr.ib()
retain: bool = attr.ib()
timestamp: dt.datetime = attr.ib(default=None)
def log_message(
hass: HomeAssistant,
entity_id: str,
topic: str,
payload: PublishPayloadType,
qos: int,
retain: bool,
) -> None:
"""Log an outgoing MQTT message."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
if topic not in entity_info["transmitted"]:
entity_info["transmitted"][topic] = {
"messages": deque([], STORED_MESSAGES),
}
msg = TimestampedPublishMessage(
topic, payload, qos, retain, timestamp=dt_util.utcnow()
)
entity_info["transmitted"][topic]["messages"].append(msg)
def add_subscription(hass, message_callback, subscription):
"""Prepare debug data for subscription."""
if entity_id := getattr(message_callback, "__entity_id", None):
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
if subscription not in entity_info["subscriptions"]:
entity_info["subscriptions"][subscription] = {
"count": 0,
"messages": deque([], STORED_MESSAGES),
}
entity_info["subscriptions"][subscription]["count"] += 1
def remove_subscription(hass, message_callback, subscription):
"""Remove debug data for subscription if it exists."""
entity_id = getattr(message_callback, "__entity_id", None)
if entity_id and entity_id in hass.data[DATA_MQTT_DEBUG_INFO]["entities"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"] -= 1
if not hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"].pop(
subscription
)
def add_entity_discovery_data(hass, discovery_data, entity_id):
"""Add discovery data."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
entity_info["discovery_data"] = discovery_data
def update_entity_discovery_data(hass, discovery_payload, entity_id):
"""Update discovery data."""
entity_info = hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]
entity_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_entity_data(hass, entity_id):
"""Remove discovery data."""
if entity_id in hass.data[DATA_MQTT_DEBUG_INFO]["entities"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"].pop(entity_id)
def add_trigger_discovery_data(hass, discovery_hash, discovery_data, device_id):
"""Add discovery data."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
debug_info["triggers"][discovery_hash] = {
"device_id": device_id,
"discovery_data": discovery_data,
}
def update_trigger_discovery_data(hass, discovery_hash, discovery_payload):
"""Update discovery data."""
trigger_info = hass.data[DATA_MQTT_DEBUG_INFO]["triggers"][discovery_hash]
trigger_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_trigger_discovery_data(hass, discovery_hash):
"""Remove discovery data."""
hass.data[DATA_MQTT_DEBUG_INFO]["triggers"].pop(discovery_hash)
def _info_for_entity(hass: HomeAssistant, entity_id: str) -> dict[str, Any]:
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = mqtt_debug_info["entities"][entity_id]
subscriptions = [
{
"topic": topic,
"messages": [
{
"payload": str(msg.payload),
"qos": msg.qos,
"retain": msg.retain,
"time": msg.timestamp,
"topic": msg.topic,
}
for msg in subscription["messages"]
],
}
for topic, subscription in entity_info["subscriptions"].items()
]
transmitted = [
{
"topic": topic,
"messages": [
{
"payload": str(msg.payload),
"qos": msg.qos,
"retain": msg.retain,
"time": msg.timestamp,
"topic": msg.topic,
}
for msg in subscription["messages"]
],
}
for topic, subscription in entity_info["transmitted"].items()
]
discovery_data = {
"topic": entity_info["discovery_data"].get(ATTR_DISCOVERY_TOPIC, ""),
"payload": entity_info["discovery_data"].get(ATTR_DISCOVERY_PAYLOAD, ""),
}
return {
"entity_id": entity_id,
"subscriptions": subscriptions,
"discovery_data": discovery_data,
"transmitted": transmitted,
}
def _info_for_trigger(hass: HomeAssistant, trigger_key: str) -> dict[str, Any]:
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
trigger = mqtt_debug_info["triggers"][trigger_key]
discovery_data = None
if trigger["discovery_data"] is not None:
discovery_data = {
"topic": trigger["discovery_data"][ATTR_DISCOVERY_TOPIC],
"payload": trigger["discovery_data"][ATTR_DISCOVERY_PAYLOAD],
}
return {"discovery_data": discovery_data, "trigger_key": trigger_key}
def info_for_config_entry(hass):
"""Get debug info for all entities and triggers."""
mqtt_info = {"entities": [], "triggers": []}
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
for entity_id in mqtt_debug_info["entities"]:
mqtt_info["entities"].append(_info_for_entity(hass, entity_id))
for trigger_key in mqtt_debug_info["triggers"]:
mqtt_info["triggers"].append(_info_for_trigger(hass, trigger_key))
return mqtt_info
def info_for_device(hass, device_id):
"""Get debug info for a device."""
mqtt_info = {"entities": [], "triggers": []}
entity_registry = er.async_get(hass)
entries = er.async_entries_for_device(
entity_registry, device_id, include_disabled_entities=True
)
mqtt_debug |
TakayukiSakai/tensorflow | tensorflow/contrib/layers/python/layers/feature_column_ops.py | Python | apache-2.0 | 14,753 | 0.003254 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
name=None,
trainable=True):
"""A tf.contrib.layer style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
An example usage of input_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensor,
feature_columns=feature_columns)
second_layer = tf.contrib.layer.fully_connected(first_layer, ...)
...
where feature_columns can be defined as follows:
query_word = sparse_column_with_hash_bucket(
'query_word', hash_bucket_size=int(1e6))
query_embedding = embedding_column(query_word, dimension=16)
age_bucket = bucketized_column(real_valued_column('age'),
boundaries=[18, 21, 30, 50, 70])
query_age = crossed_column([query_word, age_bucket],
hash_bucket_size=1e6)
feature_columns=[query_embedding, query_age]
Args:
columns_to_tensors: A mapp | ing from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes de | rived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_op_scope` for details.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
with variable_scope.variable_op_scope(columns_to_tensors.values(), name,
'input_from_feature_columns'):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
output_tensors.append(column.to_dnn_input_layer(
transformed_tensor, weight_collections, trainable))
return array_ops.concat(1, output_tensors)
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
name=None,
trainable=True):
"""A tf.contrib.layer style linear prediction builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
An example usage of weighted_sum_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels)
where feature_columns can be defined as follows:
query_word = sparse_column_with_hash_bucket(
'query_word', hash_bucket_size=int(1e6))
query_embedding = embedding_column(query_word, dimension=16)
age_bucket = bucketized_column(real_valued_column('age'),
boundaries=[18, 21, 30, 50, 70])
query_age = crossed_column([query_word, age_bucket],
hash_bucket_size=1e6)
feature_columns=[query_embedding, query_age]
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_op_scope` for details.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
Returns:
A tuple of followings:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
with variable_scope.variable_op_scope(columns_to_tensors.values(), name,
'weighted_sum_from_feature_columns'):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
predictions, variable = column.to_weighted_sum(transformed_tensor,
num_outputs,
weight_collections,
trainable)
output_tensors.append(pred |
AudioCommons/ac-mediator | docs/conf.py | Python | apache-2.0 | 11,164 | 0.001164 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Audio Commons Service adaptor guidelines documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 8 13:46:45 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../services/'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ac_mediator.settings")
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.httpdomain',
]
# Define ApiViewDocumenter class to automatically document
# api methods without adding function name and parameters
from sphinx.ext import autodoc
class ApiViewDocumenter(autodoc.FunctionDocumenter):
objtype='apiview'
content_indent=u''
def add_directive_header(self, sig):
return
def setup(app):
app.add_autodocumenter(ApiViewDocumenter)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Audio Commons services and API documentation'
copyright = '2016, Audio Commons consortium'
author = 'Audio Commons consortium'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# We use a customized version of sphinx_rtd_theme that can be found in this
# repository: https://github.com/AudioCommons/sphinx_rtd_theme
# To install it you should copy the contents of the folder 'sphinx_rtd_theme'
# from the repository to docs/_themes/ac_sphinx_rtd_theme
html_theme = "ac_sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': False,
'navigation_depth': 3,
'prev_next_buttons_location': None,
'logo_only': True,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes", ]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = 'Audio Commons services and API documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'project_logo_h.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
html_show_sphinx = False
# If true, "(C) Copyright ... | " is shown in the HTML footer. Default is True.
#
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The val | ue of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search res |
aroth-arsoft/arsoft-web-openvpn | arsoft/web/utils.py | Python | gpl-3.0 | 46,718 | 0.002055 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
import sys
import types
import re
import os.path
import collections
def _get_system_language_code():
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
import locale
(lang_code, charset) = locale.getdefaultlocale()
if lang_code == None:
ret = 'en-US'
else:
# convert en_US to en-US
ret = lang_code.replace('_', '-')
return ret
def _get_system_timezone():
import time
ret = time.tzname[0]
return ret
def _get_default_admin():
import socket
fqdn = socket.getfqdn()
return ('root', 'root@' + fqdn)
def _is_running_in_devserver(appdir):
import __main__
main_script = os.path.abspath(__main__.__file__)
if main_script == os.path.join(appdir, 'manage.py'):
return True
elif '--in-development' in sys.argv:
return True
else:
return False
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k, v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if isinstance(cleansed, collections.Callable):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
from django.conf import settings
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def is_debug_info_disabled():
from django.conf import settings
if hasattr(settings, 'DISABLE_DEBUG_INFO_PAGE'):
return bool(getattr(settings, 'DISABLE_DEBUG_INFO_PAGE'))
else:
return False
def initialize_settings(settings_module, setttings_file, options={}, use_local_tz=False):
settings_obj = sys.modules[settings_module]
settings_obj_type = type(settings_obj)
appname = settings_module
settings_module_elems = settings_module.split('.')
setttings_dir = os.path.dirname(setttings_file)
if settings_module_elems[-1] == 'settings':
appname_elems = settings_module_elems[:-1]
appname = '.'.join(appname_elems)
settings_dir_end = '/'.join(appname_elems)
app_etc_dir = os.path.join('/etc', settings_dir_end)
if setttings_dir.endswith(settings_dir_end):
appdir = setttings_dir[:-len(settings_dir_end)]
else:
appdir = setttings_dir
app_data_dir = os.path.join('/var/lib', settings_dir_end)
else:
appdir = setttings_dir
app_etc_dir = setttings_dir
app_data_dir = setttings_dir
in_devserver = _is_running_in_devserver(appdir)
if 'BASE_PATH' in os.environ:
settings_obj.BASE_PATH = os.environ['BASE_PATH']
if len(settings_obj.BASE_PATH) > 2 and settings_obj.BASE_PATH[-1] == '/':
settings_obj.BASE_PATH = settings_obj.BASE_PATH[:-1]
else:
settings_obj.BASE_PATH = ''
print('initialize_settings for ' + appname + ' appdir ' + appdir + ' debug=' | + str(in_devserver) + ' basepath=' + str(settings_obj.BASE_PATH))
if 'debug' in options:
settings_obj.DEBUG = option | s['debug']
else:
settings_obj.DEBUG = in_devserver
# If DISABLE_DEBUG_INFO_PAGE is set the
settings_obj.DISABLE_DEBUG_INFO_PAGE = False
settings_obj.ADMINS = _get_default_admin()
settings_obj.MANAGERS = settings_obj.ADMINS
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
settings_obj.USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
settings_obj.USE_L10N = True
# use the language code from the system
settings_obj.LANGUAGE_CODE = _get_system_language_code()
# If you set this to False, Django will not use timezone-aware datetimes.
settings_obj.USE_TZ = True
if use_local_tz:
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
settings_obj.TIME_ZONE = _get_system_timezone()
else:
# By default use the UTC as timezone to avoid issues when the time zone on
# the server changed (e.g. daylight saving).
settings_obj.TIME_ZONE = 'UTC'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
settings_obj.STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
settings_obj.STATIC_URL = settings_obj.BASE_PATH + '/static/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
settings_obj.MEDIA_ROOT = app_data_dir
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
settings_obj.MEDIA_URL = settings_obj.BASE_PATH + '/media/'
settings_obj.ROOT_URLCONF = appname + '.urls'
# Python dotted path to the WSGI application used by Django's runserver.
settings_obj.WSGI_APPLICATION = appname + '.wsgi.application'
settings_obj.SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
settings_obj.MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
settings_obj.AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
settings_obj.LOGIN_URL = settings_obj.BASE_PATH + '/accounts/login/'
# use sendmail as email backend by default
settings_obj.EMAIL_BACKEND = 'arsoft.web.backends.SendmailBackend'
settings_obj.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# Additional locations of static files and the List of finder classes
# that know how to find static files in various locations.
if in_devserver:
app_static_dir = os.path.join(appdir, 'static')
if os.path.exists(app_static_dir):
settings_obj.STATICFILES_DIRS = [ app_static_dir ]
else:
settings_obj.STATICFILES_DIRS = []
else:
settings_obj.STATICFILES_DIRS = [ os.path.join(app_etc_dir, 'static') ]
settings_obj.STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder' ]
# set up the template directories and loaders
template_dirs = []
if in_devserver:
app_template_dir = os.path.join(appdir, 'templates')
if os.path.exists(app_template_dir):
template_dirs = [ app_template_dir ]
else:
template_dirs = []
else:
template_dirs = [ os.path.join(app_etc_dir, 'templates') ]
settings_obj.TEMPLATES = [
{
'BACKEND': 'djan |
grovesr/django-ims | ims/migrations/0011_auto_20160115_1328.py | Python | bsd-3-clause | 517 | 0.001934 | # -* | - coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-15 18:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ims', '0010_auto_20151227_1147'),
]
operations = [
migrations.AlterField(
model_name='site',
name='address1',
field=models.CharField(blank=True, default=b'', help_text=b'First street address of this site', max_length=50),
),
| ]
|
mezz64/home-assistant | homeassistant/components/keba/__init__.py | Python | apache-2.0 | 8,577 | 0.000816 | """Support for KEBA charging st | ations."""
import as | yncio
import logging
from keba_kecontact.connection import KebaKeContact
import voluptuous as vol
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
DOMAIN = "keba"
SUPPORTED_COMPONENTS = ["binary_sensor", "sensor", "lock", "notify"]
CONF_RFID = "rfid"
CONF_FS = "failsafe"
CONF_FS_TIMEOUT = "failsafe_timeout"
CONF_FS_FALLBACK = "failsafe_fallback"
CONF_FS_PERSIST = "failsafe_persist"
CONF_FS_INTERVAL = "refresh_interval"
MAX_POLLING_INTERVAL = 5 # in seconds
MAX_FAST_POLLING_COUNT = 4
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_RFID, default="00845500"): cv.string,
vol.Optional(CONF_FS, default=False): cv.boolean,
vol.Optional(CONF_FS_TIMEOUT, default=30): cv.positive_int,
vol.Optional(CONF_FS_FALLBACK, default=6): cv.positive_int,
vol.Optional(CONF_FS_PERSIST, default=0): cv.positive_int,
vol.Optional(CONF_FS_INTERVAL, default=5): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_SERVICE_MAP = {
"request_data": "async_request_data",
"set_energy": "async_set_energy",
"set_current": "async_set_current",
"authorize": "async_start",
"deauthorize": "async_stop",
"enable": "async_enable_ev",
"disable": "async_disable_ev",
"set_failsafe": "async_set_failsafe",
}
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Check connectivity and version of KEBA charging station."""
host = config[DOMAIN][CONF_HOST]
rfid = config[DOMAIN][CONF_RFID]
refresh_interval = config[DOMAIN][CONF_FS_INTERVAL]
keba = KebaHandler(hass, host, rfid, refresh_interval)
hass.data[DOMAIN] = keba
# Wait for KebaHandler setup complete (initial values loaded)
if not await keba.setup():
_LOGGER.error("Could not find a charging station at %s", host)
return False
# Set failsafe mode at start up of Home Assistant
failsafe = config[DOMAIN][CONF_FS]
timeout = config[DOMAIN][CONF_FS_TIMEOUT] if failsafe else 0
fallback = config[DOMAIN][CONF_FS_FALLBACK] if failsafe else 0
persist = config[DOMAIN][CONF_FS_PERSIST] if failsafe else 0
try:
hass.loop.create_task(keba.set_failsafe(timeout, fallback, persist))
except ValueError as ex:
_LOGGER.warning("Could not set failsafe mode %s", ex)
# Register services to hass
async def execute_service(call: ServiceCall) -> None:
"""Execute a service to KEBA charging station.
This must be a member function as we need access to the keba
object here.
"""
function_name = _SERVICE_MAP[call.service]
function_call = getattr(keba, function_name)
await function_call(call.data)
for service in _SERVICE_MAP:
hass.services.async_register(DOMAIN, service, execute_service)
# Load components
for domain in SUPPORTED_COMPONENTS:
hass.async_create_task(
discovery.async_load_platform(hass, domain, DOMAIN, {}, config)
)
# Start periodic polling of charging station data
keba.start_periodic_request()
return True
class KebaHandler(KebaKeContact):
"""Representation of a KEBA charging station connection."""
def __init__(self, hass, host, rfid, refresh_interval):
"""Initialize charging station connection."""
super().__init__(host, self.hass_callback)
self._update_listeners = []
self._hass = hass
self.rfid = rfid
self.device_name = "keba" # correct device name will be set in setup()
self.device_id = "keba_wallbox_" # correct device id will be set in setup()
# Ensure at least MAX_POLLING_INTERVAL seconds delay
self._refresh_interval = max(MAX_POLLING_INTERVAL, refresh_interval)
self._fast_polling_count = MAX_FAST_POLLING_COUNT
self._polling_task = None
def start_periodic_request(self):
"""Start periodic data polling."""
self._polling_task = self._hass.loop.create_task(self._periodic_request())
async def _periodic_request(self):
"""Send periodic update requests."""
await self.request_data()
if self._fast_polling_count < MAX_FAST_POLLING_COUNT:
self._fast_polling_count += 1
_LOGGER.debug("Periodic data request executed, now wait for 2 seconds")
await asyncio.sleep(2)
else:
_LOGGER.debug(
"Periodic data request executed, now wait for %s seconds",
self._refresh_interval,
)
await asyncio.sleep(self._refresh_interval)
_LOGGER.debug("Periodic data request rescheduled")
self._polling_task = self._hass.loop.create_task(self._periodic_request())
async def setup(self, loop=None):
"""Initialize KebaHandler object."""
await super().setup(loop)
# Request initial values and extract serial number
await self.request_data()
if (
self.get_value("Serial") is not None
and self.get_value("Product") is not None
):
self.device_id = f"keba_wallbox_{self.get_value('Serial')}"
self.device_name = self.get_value("Product")
return True
return False
def hass_callback(self, data):
"""Handle component notification via callback."""
# Inform entities about updated values
for listener in self._update_listeners:
listener()
_LOGGER.debug("Notifying %d listeners", len(self._update_listeners))
def _set_fast_polling(self):
_LOGGER.debug("Fast polling enabled")
self._fast_polling_count = 0
self._polling_task.cancel()
self._polling_task = self._hass.loop.create_task(self._periodic_request())
def add_update_listener(self, listener):
"""Add a listener for update notifications."""
self._update_listeners.append(listener)
# initial data is already loaded, thus update the component
listener()
async def async_request_data(self, param):
"""Request new data in async way."""
await self.request_data()
_LOGGER.debug("New data from KEBA wallbox requested")
async def async_set_energy(self, param):
"""Set energy target in async way."""
try:
energy = param["energy"]
await self.set_energy(float(energy))
self._set_fast_polling()
except (KeyError, ValueError) as ex:
_LOGGER.warning("Energy value is not correct. %s", ex)
async def async_set_current(self, param):
"""Set current maximum in async way."""
try:
current = param["current"]
await self.set_current(float(current))
# No fast polling as this function might be called regularly
except (KeyError, ValueError) as ex:
_LOGGER.warning("Current value is not correct. %s", ex)
async def async_start(self, param=None):
"""Authorize EV in async way."""
await self.start(self.rfid)
self._set_fast_polling()
async def async_stop(self, param=None):
"""De-authorize EV in async way."""
await self.stop(self.rfid)
self._set_fast_polling()
async def async_enable_ev(self, param=None):
"""Enable EV in async way."""
await self.enable(True)
self._set_fast_polling()
async def async_disable_ev(self, param=None):
"""Disable EV in async way."""
await self.enable(False)
self._set_fast_polling()
async def async_set_failsafe(self, param=None):
"""Set failsafe mode in async way."""
try:
timeout = param[CONF_FS_TIMEOUT]
fallback = param[CONF_FS_FALLBACK]
|
stefan-caraiman/cloudbase-init | cloudbaseinit/shell.py | Python | apache-2.0 | 1,433 | 0 | # Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.a | pache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
import sys
if | struct.calcsize("P") == 8 and sys.platform == 'win32':
# This is needed by Nano Server.
# Set COINIT_MULTITHREADED only on x64 interpreters due to issues on x86.
import pythoncom
sys.coinit_flags = pythoncom.COINIT_MULTITHREADED
pythoncom.CoInitializeEx(pythoncom.COINIT_MULTITHREADED)
from oslo_log import log as oslo_logging
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit import init
from cloudbaseinit.utils import log as logging
CONF = cloudbaseinit_conf.CONF
LOG = oslo_logging.getLogger(__name__)
def main():
CONF(sys.argv[1:])
logging.setup('cloudbaseinit')
try:
init.InitManager().configure_host()
except Exception as exc:
LOG.exception(exc)
raise
if __name__ == "__main__":
main()
|
JeffRoy/mi-dataset | mi/dataset/driver/ctdmo_ghqr/sio/test/test_ctdmo_ghqr_sio_co_recovered_driver.py | Python | bsd-2-clause | 1,101 | 0.011807 |
__author__ = 'mworden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.ctdmo_ghqr.sio.ctdmo_ghqr_s | io_co_recovered_driver import parse
from mi.dataset.dataset_driver impor | t ParticleDataHandler
class SampleTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi','dataset','driver','ctdmo_ghqr','sio',
'resource','CTD15906.DAT')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj)
print particle_data_hdlr_obj._samples
print particle_data_hdlr_obj._failure
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one()
|
dfdx2/ancfinder | scripts/update_building_permits.py | Python | cc0-1.0 | 1,929 | 0.016071 | import requests, zipfile, subprocess, csv, json
update_data_file = 'http://data.octo.dc.gov/feeds/dcra_building_permits/dcra_building_permits_current_csv.zip'
zip_filename = 'dcra_building_permits_current_csv.zip'
# Gather the f | ile
zip_request = requests.get('http://data.octo.dc.gov/feeds/dcra_building_permits/dcra_building_permits_current_csv.zip', stream=True)
# Download the file and save locally
with open(zip_filename, 'wb') as zip_file:
for chunk in zip_request.iter_content(chunk_size=1024):
if chunk:
| zip_file.write(chunk)
zip_file.flush()
# Unzip the locally saved file
zipfile.main(['-e', zip_filename, 'data'])
# Clean up downloaded file
subprocess.call('rm dcra_building_permits_current_csv.zip', shell=True)
# Calculate permits in each ANC
permits_read = csv.reader(open('data/dcra_building_permits_current_csv.csv'), delimiter=',')
anc_permits_write = csv.writer(open('data/anc-building-permits.csv', 'w'), delimiter=',')
smd_permits_write = csv.writer(open('data/smd-building-permits.csv', 'w'), delimiter=',')
anc_permits = {}
smd_permits = {}
# The DC CSV does have ANC and SMD data, but it includes SMDs that don't exist, so we need to do it ourselves
for rec in permits_read:
if rec[26] in ['NONE', 'SMD']:
continue
url = "http://gis.govtrack.us/boundaries/dc-smd-2013/?contains=" + rec[18] + "," + rec[19]
request = requests.get(url)
data = json.loads(request.text)
smd = data['objects'][0]['external_id']
anc = data['objects'][0]['external_id'][:2]
print smd, rec[20], rec[19]
if smd in smd_permits:
smd_permits[smd] += 1
else:
smd_permits[smd] = 1
if anc in anc_permits:
anc_permits[anc] += 1
else:
anc_permits[anc] = 1
for rec in sorted(smd_permits):
output = [rec] + [smd_permits[rec]]
smd_permits_write.writerow(output)
for rec in sorted(anc_permits):
output = [rec] + [anc_permits[rec]]
anc_permits_write.writerow(output)
|
WST/django-project-template | portal/middleware.py | Python | mit | 388 | 0.025773 | # -*- coding: utf-8 -*-
class PortalMiddleware:
def __init__(self, get_response):
self.get_response = get_response
| def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Cod | e to be executed for each request/response after
# the view is called.
return response
|
cmayes/md_utils | tests/test_press_dups.py | Python | bsd-3-clause | 1,638 | 0.001832 | # coding=utf-8
"""
Tests for wham_rad.
"""
import unittest
import os
from md_utils.md_common import silent_remove, diff_lines
from md_utils.press_dups import avg_rows, compress_dups, main
__author__ = 'mayes'
DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
DUPS_DIR = os.path.join(DATA_DIR, 'press_dups')
HEAD_RAW = os.path.join(DUPS_DIR, 'proc_data_all_head0.75.csv')
HEAD_STD = os.path.join(DUPS_DIR, 'std_proc_data_all_head0.75.csv')
HEAD_PRESS = os.path.join(DUPS_DIR, 'pressed_' + 'proc_data_all_head0.75.csv')
# Shared Methods #
class TestAvgRows(unittest.TestCase):
def testThree(self):
data = [{"a": 1.3, "b": 3.0, "c": 8.5}, {"a": 1.3, "b": 1.0, "c": -4.2},
{"a": 1.3, "b": 2.2, "c": 19.0}]
avg = avg_rows(data)
self.assertAlmostEqual(avg['a'], 1.3)
self.assertAlmostEqual(avg['b'], 2.066666666666)
self.assertAlmostEqual(avg['c'], 7.766666666666)
class TestPressDups(unittest.TestCase):
def testThree(self):
data = [{"a": 1.3, "b": 3.0, "c": 8.5}, {"a": 1.3, "b": 1.0, "c": -4.2},
{"a": 1.3, "b": 2 | .2, "c": 19.0}, {"a": 99, "b": 1.0, "c": -4.2},
{"a": -22, "b": 1.0, "c": -4.2}]
avg = compress_dups(data, "a")
self.assertEqual(3, len(avg))
class TestMainNoOutput(unittest.TestCase):
def testNoArg(self):
main([])
class TestMain(unittest.TestCase):
def testWithHead075Data(self):
try:
main(argv=[HEA | D_RAW])
self.assertFalse(diff_lines(HEAD_STD, HEAD_PRESS))
finally:
silent_remove(HEAD_PRESS)
# pass
|
Yarrick13/hwasp | tests/asp/AllAnswerSets/aggregates/solitaire15.test.py | Python | apache-2.0 | 40,300 | 0.000149 | input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 0 0
1 24 0 0
1 25 0 0
1 26 0 0
1 27 0 0
1 28 0 0
1 29 0 0
1 30 0 0
1 31 0 0
1 32 0 0
1 33 0 0
1 34 0 0
1 35 0 0
1 36 0 0
1 37 0 0
1 38 0 0
1 39 0 0
1 40 0 0
1 41 0 0
1 42 0 0
1 43 0 0
1 44 0 0
1 45 0 0
1 46 0 0
1 47 0 0
1 48 0 0
1 49 0 0
1 50 0 0
1 51 0 0
1 52 0 0
1 53 0 0
1 54 0 0
1 55 0 0
3 132 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 1 0 188
2 189 132 0 1 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
1 1 2 1 189 188
2 190 132 0 2 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
1 1 2 0 190 188
1 188 0 0
3 132 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 1 0 323
2 324 132 0 1 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
1 1 2 1 324 323
2 325 132 0 2 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 | 301 302 303 304 305 306 307 308 | 309 310 311 312 313 314 315 316 317 318 319 320 321 322
1 1 2 0 325 323
1 323 0 0
1 326 1 0 187
1 327 1 0 186
1 328 1 0 185
1 329 1 0 184
1 330 1 0 183
1 331 1 0 182
1 332 1 0 181
1 333 1 0 180
1 334 1 0 179
1 335 1 0 178
1 336 1 0 177
1 337 1 0 176
1 338 1 0 175
1 339 1 0 174
1 340 1 0 173
1 341 1 0 172
1 342 1 0 171
1 343 1 0 170
1 344 1 0 169
1 345 1 0 168
1 346 1 0 167
1 347 1 0 166
1 348 1 0 165
1 349 1 0 164
1 350 1 0 163
1 351 1 0 162
1 352 1 0 161
1 353 1 0 160
1 354 1 0 159
1 355 1 0 158
1 356 1 0 157
1 357 1 0 156
1 358 1 0 155
1 359 1 0 322
1 360 1 0 321
1 361 1 0 320
1 362 1 0 319
1 363 1 0 318
1 364 1 0 317
1 365 1 0 316
1 366 1 0 315
1 367 1 0 314
1 368 1 0 313
1 369 1 0 312
1 370 1 0 311
1 371 1 0 310
1 372 1 0 309
1 373 1 0 308
1 374 1 0 307
1 375 1 0 306
1 376 1 0 305
1 377 1 0 304
1 378 1 0 303
1 379 1 0 302
1 380 1 0 301
1 381 1 0 300
1 382 1 0 299
1 383 1 0 298
1 384 1 0 297
1 385 1 0 296
1 386 1 0 295
1 387 1 0 294
1 388 1 0 293
1 389 1 0 292
1 390 1 0 291
1 391 1 0 290
1 327 1 0 187
1 328 1 0 186
1 330 1 0 184
1 331 1 0 183
1 333 1 0 181
1 334 1 0 180
1 335 1 0 179
1 336 1 0 178
1 337 1 0 177
1 338 1 0 176
1 340 1 0 174
1 341 1 0 173
1 342 1 0 172
1 343 1 0 171
1 344 1 0 170
1 345 1 0 169
1 347 1 0 167
1 348 1 0 166
1 349 1 0 165
1 350 1 0 164
1 351 1 0 163
1 352 1 0 162
1 354 1 0 160
1 355 1 0 159
1 357 1 0 157
1 358 1 0 156
1 360 1 0 322
1 361 1 0 321
1 363 1 0 319
1 364 1 0 318
1 366 1 0 316
1 367 1 0 315
1 368 1 0 314
1 369 1 0 313
1 370 1 0 312
1 371 1 0 311
1 373 1 0 309
1 374 1 0 308
1 375 1 0 307
1 376 1 0 306
1 377 1 0 305
1 378 1 0 304
1 380 1 0 302
1 381 1 0 301
1 382 1 0 300
1 383 1 0 299
1 384 1 0 298
1 385 1 0 297
1 387 1 0 295
1 388 1 0 294
1 390 1 0 292
1 391 1 0 291
1 392 1 0 187
1 393 1 0 184
1 394 1 0 181
1 395 1 0 180
1 396 1 0 179
1 397 1 0 178
1 398 1 0 177
1 399 1 0 174
1 400 1 0 173
1 401 1 0 172
1 402 1 0 171
1 403 1 0 170
1 404 1 0 167
1 405 1 0 166
1 406 1 0 165
1 407 1 0 164
1 408 1 0 163
1 409 1 0 160
1 410 1 0 157
1 411 1 0 322
1 412 1 0 319
1 413 1 0 316
1 414 1 0 315
1 415 1 0 314
1 416 1 0 313
1 417 1 0 312
1 418 1 0 309
1 419 1 0 308
1 420 1 0 307
1 421 1 0 306
1 422 1 0 305
1 423 1 0 302
1 424 1 0 301
1 425 1 0 300
1 426 1 0 299
1 427 1 0 298
1 428 1 0 295
1 429 1 0 292
1 326 1 0 154
1 327 1 0 153
1 328 1 0 152
1 329 1 0 151
1 330 1 0 150
1 331 1 0 149
1 332 1 0 148
1 333 1 0 147
1 334 1 0 146
1 335 1 0 145
1 336 1 0 144
1 337 1 0 143
1 338 1 0 142
1 339 1 0 141
1 340 1 0 140
1 341 1 0 139
1 342 1 0 138
1 343 1 0 137
1 344 1 0 136
1 345 1 0 135
1 346 1 0 134
1 347 1 0 133
1 348 1 0 132
1 349 1 0 131
1 350 1 0 130
1 351 1 0 129
1 352 1 0 128
1 353 1 0 127
1 354 1 0 126
1 355 1 0 125
1 356 1 0 124
1 357 1 0 123
1 358 1 0 122
1 359 1 0 289
1 360 1 0 288
1 361 1 0 287
1 362 1 0 286
1 363 1 0 285
1 364 1 0 284
1 365 1 0 283
1 366 1 0 282
1 367 1 0 281
1 368 1 0 280
1 369 1 0 279
1 370 1 0 278
1 371 1 0 277
1 372 1 0 276
1 373 1 0 275
1 374 1 0 274
1 375 1 0 273
1 376 1 0 272
1 377 1 0 271
1 378 1 0 270
1 379 1 0 269
1 380 1 0 268
1 381 1 0 267
1 382 1 0 266
1 383 1 0 265
1 384 1 0 264
1 385 1 0 263
1 386 1 0 262
1 387 1 0 261
1 388 1 0 260
1 389 1 0 259
1 390 1 0 258
1 391 1 0 257
1 326 1 0 153
1 327 1 0 152
1 329 1 0 150
1 330 1 0 149
1 332 1 0 147
1 333 1 0 146
1 334 1 0 145
1 335 1 0 144
1 336 1 0 143
1 337 1 0 142
1 339 1 0 140
1 340 1 0 139
1 341 1 0 138
1 342 1 0 137
1 343 1 0 136
1 344 1 0 135
1 346 1 0 133
1 347 1 0 132
1 348 1 0 131
1 349 1 0 130
1 350 1 0 129
1 351 1 0 128
1 353 1 0 126
1 354 1 0 125
1 356 1 0 123
1 357 1 0 122
1 359 1 0 288
1 360 1 0 287
1 362 1 0 285
1 363 1 0 284
1 365 1 0 282
1 366 1 0 281
1 367 1 0 280
1 368 1 0 279
1 369 1 0 278
1 370 1 0 277
1 372 1 0 275
1 373 1 0 274
1 374 1 0 273
1 375 1 0 272
1 376 1 0 271
1 377 1 0 270
1 379 1 0 268
1 380 1 0 267
1 381 1 0 266
1 382 1 0 265
1 383 1 0 264
1 384 1 0 263
1 386 1 0 261
1 387 1 0 260
1 389 1 0 258
1 390 1 0 257
1 430 1 0 152
1 431 1 0 149
1 432 1 0 146
1 433 1 0 145
1 394 1 0 144
1 395 1 0 143
1 396 1 0 142
1 434 1 0 139
1 435 1 0 138
1 399 1 0 137
1 400 1 0 136
1 401 1 0 135
1 436 1 0 132
1 437 1 0 131
1 404 1 0 130
1 405 1 0 129
1 406 1 0 128
1 438 1 0 125
1 439 1 0 122
1 440 1 0 287
1 441 1 0 284
1 442 1 0 281
1 443 1 0 280
1 413 1 0 279
1 414 1 0 278
1 415 1 0 277
1 444 1 0 274
1 445 1 0 273
1 418 1 0 272
1 419 1 0 271
1 420 1 0 270
1 446 1 0 267
1 447 1 0 266
1 423 1 0 265
1 424 1 0 264
1 425 1 0 263
1 448 1 0 260
1 449 1 0 257
1 326 1 0 121
1 327 1 0 120
1 328 1 0 119
1 329 1 0 118
1 330 1 0 117
1 331 1 0 116
1 332 1 0 115
1 333 1 0 114
1 334 |
sergeyf/scikit-learn | sklearn/decomposition/_kernel_pca.py | Python | bsd-3-clause | 21,772 | 0.000459 | """Kernel Principal Components Analysis."""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Sylvain Marie <sylvain.marie@schneider-electric.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from scipy.sparse.linalg import eigsh
from ..utils._arpack import _init_arpack_v0
from ..utils.extmath import svd_flip, _randomized_eigsh
from ..utils.validation import (
check_is_fitted,
_check_psd_eigenvalues,
)
from ..utils.deprecation import deprecated
from ..exceptions import NotFittedError
from ..base import BaseEsti | mator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..preprocessing import KernelCenterer
from ..metrics.pairwise import pairwise_kernels
class KernelPCA(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Kerne | l Principal component analysis (KPCA) [1]_.
Non-linear dimensionality reduction through the use of kernels (see
:ref:`metrics`).
It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD
or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the
truncated SVD, depending on the shape of the input data and the number of
components to extract. It can also use a randomized truncated SVD by the
method proposed in [3]_, see `eigen_solver`.
Read more in the :ref:`User Guide <kernel_PCA>`.
Parameters
----------
n_components : int, default=None
Number of components. If None, all non-zero components are kept.
kernel : {'linear', 'poly', \
'rbf', 'sigmoid', 'cosine', 'precomputed'}, default='linear'
Kernel used for PCA.
gamma : float, default=None
Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``.
degree : int, default=3
Degree for poly kernels. Ignored by other kernels.
coef0 : float, default=1
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict, default=None
Parameters (keyword arguments) and
values for kernel passed as callable object.
Ignored by other kernels.
alpha : float, default=1.0
Hyperparameter of the ridge regression that learns the
inverse transform (when fit_inverse_transform=True).
fit_inverse_transform : bool, default=False
Learn the inverse transform for non-precomputed kernels
(i.e. learn to find the pre-image of a point). This method is based
on [2]_.
eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \
default='auto'
Select eigensolver to use. If `n_components` is much
less than the number of training samples, randomized (or arpack to a
smaller extend) may be more efficient than the dense eigensolver.
Randomized SVD is performed according to the method of Halko et al
[3]_.
auto :
the solver is selected by a default policy based on n_samples
(the number of training samples) and `n_components`:
if the number of components to extract is less than 10 (strict) and
the number of samples is more than 200 (strict), the 'arpack'
method is enabled. Otherwise the exact full eigenvalue
decomposition is computed and optionally truncated afterwards
('dense' method).
dense :
run exact full eigenvalue decomposition calling the standard
LAPACK solver via `scipy.linalg.eigh`, and select the components
by postprocessing
arpack :
run SVD truncated to n_components calling ARPACK solver using
`scipy.sparse.linalg.eigsh`. It requires strictly
0 < n_components < n_samples
randomized :
run randomized SVD by the method of Halko et al. [3]_. The current
implementation selects eigenvalues based on their module; therefore
using this method can lead to unexpected results if the kernel is
not positive semi-definite. See also [4]_.
.. versionchanged:: 1.0
`'randomized'` was added.
tol : float, default=0
Convergence tolerance for arpack.
If 0, optimal value will be chosen by arpack.
max_iter : int, default=None
Maximum number of iterations for arpack.
If None, optimal value will be chosen by arpack.
iterated_power : int >= 0, or 'auto', default='auto'
Number of iterations for the power method computed by
svd_solver == 'randomized'. When 'auto', it is set to 7 when
`n_components < 0.1 * min(X.shape)`, other it is set to 4.
.. versionadded:: 1.0
remove_zero_eig : bool, default=False
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
random_state : int, RandomState instance or None, default=None
Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18
copy_X : bool, default=True
If True, input X is copied and stored by the model in the `X_fit_`
attribute. If no further changes will be done to X, setting
`copy_X=False` saves memory by storing a reference.
.. versionadded:: 0.18
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
Attributes
----------
eigenvalues_ : ndarray of shape (n_components,)
Eigenvalues of the centered kernel matrix in decreasing order.
If `n_components` and `remove_zero_eig` are not set,
then all values are stored.
lambdas_ : ndarray of shape (n_components,)
Same as `eigenvalues_` but this attribute is deprecated.
.. deprecated:: 1.0
`lambdas_` was renamed to `eigenvalues_` in version 1.0 and will be
removed in 1.2.
eigenvectors_ : ndarray of shape (n_samples, n_components)
Eigenvectors of the centered kernel matrix. If `n_components` and
`remove_zero_eig` are not set, then all components are stored.
alphas_ : ndarray of shape (n_samples, n_components)
Same as `eigenvectors_` but this attribute is deprecated.
.. deprecated:: 1.0
`alphas_` was renamed to `eigenvectors_` in version 1.0 and will be
removed in 1.2.
dual_coef_ : ndarray of shape (n_samples, n_features)
Inverse transform matrix. Only available when
``fit_inverse_transform`` is True.
X_transformed_fit_ : ndarray of shape (n_samples, n_components)
Projection of the fitted data on the kernel principal components.
Only available when ``fit_inverse_transform`` is True.
X_fit_ : ndarray of shape (n_samples, n_features)
The data used to fit the model. If `copy_X=False`, then `X_fit_` is
a reference. This attribute is used for the calls to transform.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
FastICA : A fast algorithm for Independent Component Analysis.
IncrementalPCA : Incremental Principal Component Analysis.
NMF : Non-Negative Matrix Factorization.
PCA : Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction |
ColumbiaCMB/kid_readout | apps/data_taking_scripts/old_scripts/highq_power_sweep_0813f12.py | Python | bsd-2-clause | 5,967 | 0.019105 | import matplotlib
from kid_readout.roach import baseband
matplotlib.use('agg')
import numpy as np
import time
import sys
from kid_readout.utils import data_file,sweeps
from kid_readout.analysis.resonator import fit_best_resonator
ri = baseband.RoachBasebandWide()
ri.initialize()
#ri.set_fft_gain(6)
#f0s = np.load('/home/gjones/workspace/apps/f8_fit_resonances.npy')
#f0s = np.load('/home/gjones/workspace/apps/first_pass_sc3x3_0813f9.npy')
#f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f10_first_pass.npy')#[:4]
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f9_2014-02-11.npy')
#f0s = np.load('/home/gjones/workspace/readout/apps/sc3x3_0813f5_2014-02-27.npy')
f0s = np.load('/home/gjones/workspace/apps/sc5x4_0813f12.npy')
f0s.sort()
#f0s = f0s*(0.9995)
suffix = "power"
nf = len(f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ",atonce
f0s = np.concatenate((f0s,np.arange(1,1+at | once-(nf%atonce))+f0s.max()))
offsets = np.linspace(-4882.8125,4638.671875,20)#[5:15]
offsets = offsets
#offsets = np.concatenate(([-40e3,-20e3],offsets,[20e3,40e3]))/1e6
offsets = np.concatenate(([-40e3],offsets,[40e3]))/1e6
#offsets = offsets*4
nsamp = 2**18
step = 1
nstep = 80
f0binned = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.arange(-(nstep+1) | ,(nstep+1))*step
offsets = offset_bins*512.0/nsamp
offsets = np.concatenate(([offsets.min()-20e-3,],offsets,[offsets.max()+20e-3]))
print f0s
print offsets*1e6
print len(f0s)
if False:
from kid_readout.utils.parse_srs import get_all_temperature_data
while True:
temp = get_all_temperature_data()[1][-1]
print "mk stage at", temp
if temp > 0.348:
break
time.sleep(300)
time.sleep(600)
start = time.time()
use_fmin = True
attenlist = np.linspace(33,45,5)-6
#attenlist = [44.0]
#attenlist = attenlist[:4]
for atten in attenlist:
print "setting attenuator to",atten
ri.set_dac_attenuator(atten)
measured_freqs = sweeps.prepare_sweep(ri,f0binned,offsets,nsamp=nsamp)
print "loaded waveforms in", (time.time()-start),"seconds"
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8)
orig_sweep_data = sweep_data
meas_cfs = []
idxs = []
delays = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
res = fit_best_resonator(fr[1:-1],s21[1:-1],errors=errors[1:-1]) #Resonator(fr,s21,errors=errors)
delay = res.delay
delays.append(delay)
s21 = s21*np.exp(2j*np.pi*res.delay*fr)
res = fit_best_resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0, "delay",delay,"resid delay",res.delay
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
delay = np.median(delays)
print "median delay is ",delay
nsamp = 2**22
step = 1
f0binned_meas = np.round(f0s*nsamp/512.0)*512.0/nsamp
offset_bins = np.array([-8,-4,-2,-1,0,1,2,4])#np.arange(-4,4)*step
offset_bins = np.concatenate(([-40,-20],offset_bins,[20,40]))
offsets = offset_bins*512.0/nsamp
meas_cfs = np.array(meas_cfs)
f0binned_meas = np.round(meas_cfs*nsamp/512.0)*512.0/nsamp
f0s = f0binned_meas
measured_freqs = sweeps.prepare_sweep(ri,f0binned_meas,offsets,nsamp=nsamp)
print "loaded updated waveforms in", (time.time()-start),"seconds"
sys.stdout.flush()
time.sleep(1)
df = data_file.DataFile(suffix=suffix)
df.log_hw_state(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=8, sweep_data=orig_sweep_data)
df.add_sweep(sweep_data)
meas_cfs = []
idxs = []
for m in range(len(f0s)):
fr,s21,errors = sweep_data.select_by_freq(f0s[m])
thiscf = f0s[m]
s21 = s21*np.exp(2j*np.pi*delay*fr)
res = fit_best_resonator(fr,s21,errors=errors) #Resonator(fr,s21,errors=errors)
fmin = fr[np.abs(s21).argmin()]
print "s21 fmin", fmin, "original guess",thiscf,"this fit", res.f_0
if use_fmin:
meas_cfs.append(fmin)
else:
if abs(res.f_0 - thiscf) > 0.1:
if abs(fmin - thiscf) > 0.1:
print "using original guess"
meas_cfs.append(thiscf)
else:
print "using fmin"
meas_cfs.append(fmin)
else:
print "using this fit"
meas_cfs.append(res.f_0)
idx = np.unravel_index(abs(measured_freqs - meas_cfs[-1]).argmin(),measured_freqs.shape)
idxs.append(idx)
print meas_cfs
ri.add_tone_freqs(np.array(meas_cfs))
ri.select_bank(ri.tone_bins.shape[0]-1)
ri._sync()
time.sleep(0.5)
#raw_input("turn on LED take data")
df.log_hw_state(ri)
nsets = len(meas_cfs)/atonce
tsg = None
for iset in range(nsets):
selection = range(len(meas_cfs))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.2)
t0 = time.time()
dmod,addr = ri.get_data_seconds(30,demod=True)
print nsets,iset,tsg
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
df.nc.close()
print "completed in",((time.time()-start)/60.0),"minutes"
|
kxepal/replipy | replipy/peer.py | Python | mit | 7,259 | 0.000138 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import functools
import json
import flask
import werkzeug.exceptions
import werkzeug.http
from flask import current_app as app
from .storage import ABCDatabase
replipy = flask.Blueprint('replipy', __name__)
def make_response(code, data):
resp = flask.make_response(json.dumps(data))
resp.status_code = code
resp.headers['Content-Type'] = 'application/json'
return resp
def make_error_response(code, error, reason):
if isinstance(reason, werkzeug.exceptions.HTTPException):
reason = reason.description
else:
reason = str(reason)
return make_response(code, {'error': error, 'reason': reason})
def database_should_exists(func):
@functools.wraps(func)
def check_db(dbname, *args, **kwargs):
if dbname not in app.dbs:
return flask.abort(404, '%s missed' % dbname)
return func(dbname, *args, **kwargs)
return check_db
@replipy.record_once
def setup(state):
state.app.db_cls = state.options.get('db_cls', ABCDatabase)
state.app.dbs = {}
@replipy.errorhandler(400)
def bad_request(err):
return make_error_response(400, 'bad_request', err)
@replipy.errorhandler(404)
@replipy.errorhandler(ABCDatabase.NotFound)
def not_found(err):
return make_error_response(404, 'not_found', err)
@replipy.errorhandler(409)
@replipy.errorhandler(ABCDatabase.Conflict)
def conflict(err):
return make_error_response(409, 'conflict', err)
@replipy.errorhandler(412)
def db_exists(err):
return make_error_response(412, 'db_exists', err)
@replipy.route('/<dbname>/', methods=['HEAD', 'GET', 'PUT'])
def database(dbname):
def head():
return get()
def get():
if dbname not in app.dbs:
return flask.abort(404, '%s missed' % dbname)
return make_response(200, app.dbs[dbname].info())
def put():
if dbname in app.dbs:
return flask.abort(412, dbname)
app.dbs[dbname] = app.db_cls(dbname)
return make_response(201, {'ok': True})
return locals()[flask.request.method.lower()]()
@replipy.route('/<dbname>/<docid>', methods=['HEAD', 'GET', 'PUT', 'DELETE'])
@database_should_exists
def document(dbname, docid):
def head():
return get()
def get():
doc = db.load(docid, flask.request.args.get('rev', None))
return make_response(200, doc)
def put():
rev = flask.request.args.get('rev')
new_edits = json.loads(flask.request.args.get('new_edits', 'true'))
if flask.request.mimetype == 'application/json':
doc = flask.request.get_json()
elif flask.request.mimetype == 'multipart/related':
parts = parse_multipart_data(
flask.request.stream, flask.request.mimetype_params['boundary'])
# CouchDB has an agreement, that document goes before attachments
# which simplifies processing logic and reduces footprint
headers, body = next(parts)
assert headers['Content-Type'] == 'application/json'
doc = json.loads(body.decode())
# We have to inject revision into doc there to correct compute
# revpos field for attachments
doc.setdefault('_rev', rev)
for headers, body in parts:
params = werkzeug.http.parse_options_header(
headers['Content-Disposition'])[1]
fname = params['filename']
ctype = headers['Content-Type']
db.add_attachment(doc, fname, body, ctype)
else:
# mimics to Couch | DB response in case of unsupported mime-type
return flask.abort(400)
doc['_id'] = docid
idx, rev = db.store(doc, rev, new_edits)
return make_response(201, {'ok': True, 'id': idx, 'rev': rev})
def delete():
idx, rev = db.remove(docid, fla | sk.request.args.get('rev', None))
return make_response(201, {'ok': True, 'id': idx, 'rev': rev})
db = app.dbs[dbname]
return locals()[flask.request.method.lower()]()
@replipy.route('/<dbname>/_design/<docid>',
methods=['HEAD', 'GET', 'PUT', 'DELETE'])
def design_document(dbname, docid):
return document(dbname, '_design/' + docid)
@replipy.route('/<dbname>/_local/<docid>',
methods=['GET', 'PUT', 'DELETE'])
def local_document(dbname, docid):
return document(dbname, '_local/' + docid)
@replipy.route('/<dbname>/_revs_diff', methods=['POST'])
@database_should_exists
def database_revs_diff(dbname):
db = app.dbs[dbname]
return make_response(200, db.revs_diff(flask.request.get_json()))
@replipy.route('/<dbname>/_bulk_docs', methods=['POST'])
@database_should_exists
def database_bulk_docs(dbname):
db = app.dbs[dbname]
return make_response(201, db.bulk_docs(**flask.request.get_json()))
@replipy.route('/<dbname>/_ensure_full_commit', methods=['POST'])
@database_should_exists
def database_ensure_full_commit(dbname):
db = app.dbs[dbname]
return make_response(201, db.ensure_full_commit())
@replipy.route('/<dbname>/_changes', methods=['GET'])
@database_should_exists
def database_changes(dbname):
def generator(changes, last_seq):
yield '{"last_seq": %d,' % last_seq
yield '"results":['
change = next(changes)
yield json.dumps(change)
for change in changes:
yield ',' + json.dumps(change)
yield ']}'
db = app.dbs[dbname]
last_seq = db.update_seq
args = flask.request.args
heartbeat = args.get('heartbeat', 10000)
since = json.loads(args.get('since', '0'))
feed = args.get('feed', 'normal')
style = args.get('style', 'all_docs')
filter = args.get('filter', None)
changes = db.changes(since, feed, style, filter)
return flask.Response(generator(changes, last_seq),
content_type='application/json')
def parse_multipart_data(stream, boundary):
boundary = boundary.encode()
next_boundary = boundary and b'--' + boundary or None
last_boundary = boundary and b'--' + boundary + b'--' or None
stack = []
state = 'boundary'
line = next(stream).rstrip()
assert line == next_boundary
for line in stream:
if line.rstrip() == last_boundary:
break
if state == 'boundary':
state = 'headers'
if stack:
headers, body = stack.pop()
yield headers, b''.join(body)
stack.append(({}, []))
if state == 'headers':
if line == b'\r\n':
state = 'body'
continue
headers = stack[-1][0]
line = line.decode()
key, value = map(lambda i: i.strip(), line.split(':'))
headers[key] = value
if state == 'body':
if line.rstrip() == next_boundary:
state = 'boundary'
continue
stack[-1][1].append(line)
if stack:
headers, body = stack.pop()
yield headers, b''.join(body)
|
volusion/vol-admin-bootstrap | test-infra/s3_cache.py | Python | mit | 8,567 | 0.010155 | #!/usr/bin/env python2.7
<<<<<<< HEAD
from __future__ import absol | ute_import, unicode_literals, print_function, division
from sys import a | rgv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
=======
# pylint: disable=C0301
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, chdir, remove as _delete_file
from os.path import dirname, basename, abspath, realpath, expandvars
from hashlib import sha256
from subprocess import check_call as run
from json import load, dump as save
from contextlib import contextmanager
from datetime import datetime
>>>>>>> 1aaad6481cb064f31f85d519cd56e3c1799585cf
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
<<<<<<< HEAD
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
=======
CONFIG_FILE = './S3Cachefile.json'
UPLOAD_TODO_FILE = './S3CacheTodo.json'
BYTES_PER_MB = 1024 * 1024
@contextmanager
def timer():
start = datetime.utcnow()
yield
end = datetime.utcnow()
elapsed = end - start
print("\tDone. Took", int(elapsed.total_seconds()), "second(s).")
@contextmanager
def todo_file(writeback=True):
try:
with open(UPLOAD_TODO_FILE, 'rt') as json_file:
todo = load(json_file)
except (IOError, OSError, ValueError):
todo = {}
yield todo
if writeback:
try:
with open(UPLOAD_TODO_FILE, 'wt') as json_file:
save(todo, json_file)
except (OSError, IOError) as save_err:
print("Error saving {}:".format(UPLOAD_TODO_FILE), save_err)
>>>>>>> 1aaad6481cb064f31f85d519cd56e3c1799585cf
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
<<<<<<< HEAD
=======
def mark_needs_uploading(cache_name):
with todo_file() as todo:
todo[cache_name] = True
def mark_uploaded(cache_name):
with todo_file() as todo:
todo.pop(cache_name, None)
def need_to_upload(cache_name):
with todo_file(writeback=False) as todo:
return todo.get(cache_name, False)
>>>>>>> 1aaad6481cb064f31f85d519cd56e3c1799585cf
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
<<<<<<< HEAD
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
=======
with timer():
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
>>>>>>> 1aaad6481cb064f31f85d519cd56e3c1799585cf
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
<<<<<<< HEAD
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
=======
with timer():
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
mark_uploaded(cache_name) # reset
try:
print("Downloading {} tarball from S3...".format(cache_name))
with timer():
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
mark_needs_uploading(cache_name)
raise SystemExit("Cached {} download failed!".format(cache_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(cache_name))
>>>>>>> 1aaad6481cb064f31f85d519cd56e3c1799585cf
def upload(directory):
_create_tarball(directory)
<<<<<<< HEAD
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
=======
print("Uploading {} tarball to S3... ({})".format(cache_name, _tarball_size(directory)))
with timer():
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(cache_name))
mark_uploaded(cache_name)
>>>>>>> 1aaad6481cb064f31f85d519cd56e3c1799585cf
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
<<<<<<< HEAD
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
=======
if len(argv) != 2:
raise SystemExit("USAGE: s3_cache.py <download | upload> <cache name>")
mode, cache_name = argv
script_dir = dirname(realpath(__file__))
chdir(script_dir)
try:
with open(CONFIG_FILE, 'rt') as config_file:
config = load(config_file)
except (IOError, OSError, ValueError) as config_err:
print(config_err)
raise SystemExit("Error when trying to load config from JSON file!")
try:
cache_info = config[cache_name]
key_file = expandvars(cache_info["key"])
fallback_cmd = cache_info["generate"]
directory = expandvars(cache_info["cache"])
except (TypeError, KeyError) as load_err:
print(load_err)
raise SystemExit("Config for cache named {!r} is missing or malformed!".format(cache_name))
try:
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME)
if bucket is None:
raise SystemExit("Could not access bucket!")
key_file_hash = _sha256_of_file(key_file)
key = Key(bucket, key_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if need_to_upload(cache_name):
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode |
Injabie3/lui-cogs | spoilers/spoilers.py | Python | gpl-3.0 | 8,453 | 0.002484 | """Spoilers cog
Filters out messages that start with a certain prefix, and store them for
later retrieval.
"""
from datetime import datetime, timedelta
import logging
import json
import os
import re
import discord
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from cogs.utils import config
# Global variables
KEY_MESSAGE = "message"
KEY_AUTHOR_ID = "authorid"
KEY_AUTHOR_NAME = "author"
KEY_TIMESTAMP = "timestamp"
KEY_EMBED = "embed"
LOGGER = None
PREFIX = "spoiler"
SAVE_FOLDER = "data/lui-cogs/spoilers/" # Path to save folder.
SAVE_FILE = "settings.json"
COOLDOWN = 60
def checkFolder():
"""Used to create the data folder at first startup"""
if not os.path.exists(SAVE_FOLDER):
print("Creating " + SAVE_FOLDER + " folder...")
os.makedirs(SAVE_FOLDER)
def checkFiles():
"""Used to initialize an empty database at first startup"""
theFile = SAVE_FOLDER + SAVE_FILE
if not dataIO.is_valid_json(theFile):
print("Creating default spoilers settings.json")
dataIO.save_json(theFile, {})
class Spoilers: # pylint: disable=too-many-instance-attributes
"""Store messages for later retrieval."""
#Class constructor
def __init__(self, bot):
self.bot = bot
#The JSON keys for the settings:
checkFolder()
checkFiles()
self.settings = config.Config("settings.json",
cogname="lui-cogs/spoilers")
self.messages = self.settings.get("messages") if not None else {}
self.onCooldown = {}
@commands.command(name="spoiler", pass_context=True)
async def spoiler(self, ctx, *, msg):
"""Create a message spoiler."""
wordFilter = self.bot.get_cog("WordFilter")
if not wordFilter:
await self.bot.say("This cog requires the word filter cog to be loaded. "
"Please load the cog and try again")
return
if wordFilter.containsFilterableWords(ctx.message):
await self.bot.say("You have filtered words in your spoiler! Please " |
"check it and try again!")
return
try:
store = {}
store[KEY_MESSAGE] = msg
store[KEY_AUTHOR_ID] = ctx.message.author.id
store[KEY_AUTHOR_NAME] = "{0.name}#{0.discriminator}".format(ctx.message.author)
store[KEY_TIMESTAMP] = ctx.message.timestamp.strftime("%s")
if ctx.message.embeds:
data = discord.Embed.from_data(ctx.message.embeds[0])
| if data.type == 'image':
store[KEY_EMBED] = data.url
else:
imglinkPattern = r"(?i)http[^ ]+\.(?:png|jpg|jpeg|gif)"
match = re.search(imglinkPattern, msg)
if match:
store[KEY_EMBED] = match.group(0)
await self.bot.delete_message(ctx.message)
newMsg = await self.bot.say(":warning: {} created a spoiler! React to see "
"the message!".format(ctx.message.author.mention))
if not self.messages:
self.messages = {}
self.messages[newMsg.id] = store
await self.bot.add_reaction(newMsg, "\N{INFORMATION SOURCE}")
LOGGER.info("%s#%s (%s) added a spoiler: %s",
ctx.message.author.name,
ctx.message.author.discriminator,
ctx.message.author.id,
msg)
await self.settings.put("messages", self.messages)
except discord.errors.Forbidden as error:
await self.bot.say("I'm not able to do that.")
await self.bot.delete_message(newMsg)
LOGGER.error("Could not create a spoiler in server %s channel %s",
ctx.message.server.name,
ctx.message.channel.name)
LOGGER.error(error)
async def checkForReaction(self, data):
"""Reaction listener (using socket data)
Checks to see if a spoilered message is reacted, and if so, send a DM to the
user that reacted.
"""
# no binary frames
if isinstance(data, bytes):
return
data = json.loads(data)
event = data.get("t")
payload = data.get("d")
if event not in ("MESSAGE_REACTION_ADD", "MESSAGE_REACTION_REMOVE",
"MESSAGE_REACTION_REMOVE_ALL"):
return
isReaction = event == "MESSAGE_REACTION_ADD"
# make sure the reaction is proper
if isReaction:
msgId = payload["message_id"]
if msgId in self.messages.keys():
server = discord.utils.get(self.bot.servers,
id=payload["guild_id"])
reactedUser = discord.utils.get(server.members,
id=payload["user_id"])
if reactedUser.bot:
return
channel = discord.utils.get(server.channels,
id=payload["channel_id"])
message = await self.bot.get_message(channel, msgId)
if payload["emoji"]["id"]:
emoji = discord.Emoji(name=payload["emoji"]["name"],
id=payload["emoji"]["id"],
server=server)
else:
emoji = payload["emoji"]["name"]
await self.bot.remove_reaction(message, emoji, reactedUser)
if (msgId in self.onCooldown.keys() and
reactedUser.id in self.onCooldown[msgId].keys() and
self.onCooldown[msgId][reactedUser.id] > datetime.now()):
return
msg = self.messages[msgId]
embed = discord.Embed()
userObj = discord.utils.get(server.members,
id=msg[KEY_AUTHOR_ID])
if userObj:
embed.set_author(name="{0.name}#{0.discriminator}".format(userObj),
icon_url=userObj.avatar_url)
else:
embed.set_author(name=msg[KEY_AUTHOR_NAME])
if KEY_EMBED in msg:
embed.set_image(url=msg[KEY_EMBED])
embed.description = msg[KEY_MESSAGE]
embed.timestamp = datetime.fromtimestamp(int(msg[KEY_TIMESTAMP]))
try:
await self.bot.send_message(reactedUser, embed=embed)
if msgId not in self.onCooldown.keys():
self.onCooldown[msgId] = {}
self.onCooldown[msgId][reactedUser.id] = (datetime.now() +
timedelta(seconds=COOLDOWN))
except (discord.errors.Forbidden, discord.errors.HTTPException) as error:
LOGGER.error("Could not send DM to %s#%s (%s).",
reactedUser.name,
reactedUser.discriminator,
reactedUser.id)
LOGGER.error(error)
def setup(bot):
"""Add the cog to the bot."""
checkFolder() # Make sure the data folder exists!
checkFiles() # Make sure we have settings!
spoilersCog = Spoilers(bot)
global LOGGER # pylint: disable=global-statement
LOGGER = logging.getLogger("red.Spoilers")
if LOGGER.level == 0:
# Prevents the LOGGER from being loaded again in case of module reload.
LOGGER.setLevel(logging.INFO)
handler = logging.FileHandler(filename="data/lui-cogs/spoilers/info.log",
encoding="utf-8",
mode="a")
handler.setFormat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.