repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
SYHGroup/mau_mau_bot
|
utils.py
|
Python
|
agpl-3.0
| 3,582
| 0.000281
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Telegram bot to play UNO in group chats
# Copyright (c) 2016 Jannes Höke <uno@jhoeke.de>
#
# T
|
his program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from telegram.ext.dispatcher import run_async
from internationalization import _, __
from mwt import MWT
from shared_vars import gm
logger = logging.getLogger(__name__)
TIMEOUT = 2.5
def list_subtract(list1, list2):
""" Helper function to subtract two lists and return the sorted result """
list1 = list1.copy()
for x in list2:
list1.remove(x)
return list(sorted(list1))
def display_name(user):
""" Get the current players name including their username, if possible """
user_name = user.first_name
if user.username:
user_name += ' (@' + user.username + ')'
return user_name
def display_color(color):
""" Convert a color code to actual color name """
if color == "r":
return _("{emoji} Red").format(emoji='❤️')
if color == "b":
return _("{emoji} Blue").format(emoji='💙')
if color == "g":
return _("{emoji} Green").format(emoji='💚')
if color == "y":
return _("{emoji} Yellow").format(emoji='💛')
def display_color_group(color, game):
""" Convert a color code to actual color name """
if color == "r":
return __("{emoji} Red", game.translate).format(
emoji='❤️')
if color == "b":
return __("{emoji} Blue", game.translate).format(
emoji='💙')
if color == "g":
return __("{emoji} Green", game.translate).format(
emoji='💚')
if color == "y":
return __("{emoji} Yellow", game.translate).format(
emoji='💛')
def error(bot, update, error):
"""Simple error handler"""
logger.exception(error)
@run_async
def send_async(bot, *args, **kwargs):
"""Send a message asynchronously"""
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT
try:
bot.sendMessage(*args, **kwargs)
except Exception as e:
error(None, None, e)
@run_async
def answer_async(bot, *args, **kwargs):
"""Answer an inline query asynchronously"""
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT
try:
bot.answerInlineQuery(*args, **kwargs)
except Exception as e:
error(None, None, e)
def game_is_running(game):
return game in gm.chatid_games.get(game.chat.id, list())
def user_is_creator(user, game):
return user.id in game.owner
def user_is_admin(user, bot, chat):
return user.id in get_admin_ids(bot, chat.id)
def user_is_creator_or_admin(user, game, bot, chat):
return user_is_creator(user, game) or user_is_admin(user, bot, chat)
@MWT(timeout=60*60)
def get_admin_ids(bot, chat_id):
"""Returns a list of admin IDs for a given chat. Results are cached for 1 hour."""
return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]
|
|
tchellomello/home-assistant
|
homeassistant/helpers/device_registry.py
|
Python
|
apache-2.0
| 22,499
| 0.001022
|
"""Provide a way to connect entities belonging to one device."""
from collections import OrderedDict
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union
import attr
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import Event, callback
import homeassistant.util.uuid as uuid_util
from .debounce import Debouncer
from .singleton import singleton
from .typing import HomeAssistantType
if TYPE_CHECKING:
from . import entity_registry
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_UNDEF = object()
DATA_REGISTRY = "device_registry"
EVENT_DEVICE_REGISTRY_UPDATED = "device_registry_updated"
STORAGE_KEY = "core.device_registry"
STORAGE_VERSION = 1
SAVE_DELAY = 10
CLEANUP_DELAY = 10
CONNECTION_NETWORK_MAC = "mac"
CONNECTION_UPNP = "upnp"
CONNECTION_ZIGBEE = "zigbee"
IDX_CONNECTIONS = "connections"
IDX_IDENTIFIERS = "identifiers"
REGISTERED_DEVICE = "registered"
DELETED_DEVICE = "deleted"
@attr.s(slots=True, frozen=True)
class DeletedDeviceEntry:
"""Deleted Device Registry Entry."""
config_entries: Set[str] = attr.ib()
connections: Set[Tuple[str, str]] = attr.ib()
identifiers: Set[Tuple[str, str]] = attr.ib()
id: str = attr.ib()
def to_device_entry(self):
"""Create DeviceEntry from DeletedDeviceEntry."""
return DeviceEntry(
config_entries=self.config_entries,
connections=self.connections,
identifiers=self.identifiers,
id=self.id,
is_new=True,
)
@attr.s(slots=True, frozen=True)
class DeviceEntry:
"""Device Registry Entry."""
config_entries: Set[str] = attr.ib(converter=set, factory=set)
connections: Set[Tuple[str, str]] = attr.ib(converter=set, factory=set)
identifiers: Set[Tuple[str, str]] = attr.ib(converter=set, factory=set)
manufacturer: str = attr.ib(default=None)
model: str = attr.ib(default=None)
name: str = attr.ib(default=None)
sw_version: str = attr.ib(default=None)
via_device_id: str = attr.ib(default=None)
area_id: str = attr.ib(default=None)
name_by_user: str = attr.ib(default=None)
entry_type: str = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.uuid_v1mc_hex)
# This value is not stored, just used to keep track of events to fire.
is_new: bool = attr.ib(default=False)
def format_mac(mac: str) -> str:
"""Format the mac address string for entry into dev reg."""
to_test = mac
if len(to_test) == 17 and to_test.count(":") == 5:
return to_test.lower()
if len(to_test) == 17 and to_test.count("-") == 5:
to_test = to_test.replace("-", "")
elif len(to_test) == 14 and to_test.count(".") == 2:
to_test = to_test.replace(".", "")
if len(to_test) == 12:
# no : included
return ":".join(to_test.lower()[i : i + 2] for i in range(0, 12, 2))
# Not sure how formatted, return original
return mac
class DeviceRegistry:
"""Class to hold a registry of devices."""
devices: Dict[str, DeviceEntry]
deleted_devices: Dict[str, DeletedDeviceEntry]
_devices_index: Dict[str, Dict[str, Dict[str, str]]]
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize the device registry."""
self.hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._clear_index()
@callback
def async_get(self, device_id: str) -> Optional[DeviceEntry]:
"""Get device."""
return self.devices.get(device_id)
@callback
def async_get_device(
self, identifiers: set, connections: set
) -> Optional[DeviceEntry]:
"""Check if device is registered."""
device_id = self._async_get_device_id_from_index(
REGISTERED_DEVICE, identifiers, connections
)
if device_id is None:
return None
return self.devices[device_id]
def _async_get_deleted_device(
self, identifiers: set, connections: set
) -> Optional[DeletedDeviceEntry]:
"""Check if device is deleted."""
device_id = self._async_get_device_id_from_index(
DELETED_DEVICE, identifiers, connections
)
if device_id is None:
return None
return self.deleted_devices[device_id]
def _async_get_device_id_from_index(
self, index: str, identifiers: set, connections: set
) -> Optional[str]:
"""Check if device has previously been registered."""
devices_index = self._devices_index[index]
for identifier in identifiers:
if identifier in devices_index[IDX_IDENTIFIERS]:
return devices_index[IDX_IDENTIFIERS][identifier]
if not connections:
return None
for connection in _normalize_connections(connections):
if connection in devices_index[IDX_CONNECTIONS]:
return devices_index[IDX_CONNECTIONS][connection]
return None
def _add_device(self, device: Union[DeviceEntry, DeletedDeviceEntry]) -> None:
"""Add a device and index it."""
if isinstance(device, DeletedDeviceEntry):
devices_index = self._devices_index[DELETED_DEVICE]
self.deleted_devices[device.id] = device
else:
devices_index = self._devices_index[REGISTERED_DEVICE]
self.devices[device.id] = device
_add_device_to_index(devices_index, device)
def _remove_device(self, device: Union[DeviceEntry, DeletedDeviceEntry]) -> None:
"""Remove a device and remove it from the index."""
if isinstance(device, DeletedDeviceEntry):
devices_index = self._devices_index[DELETED_DEVICE]
self.deleted_devices.pop(device.id)
else:
devices_index = self._devices_index[REGISTERED_DEVICE]
self.devices.pop(device.id)
_remove_device_from_index(devices_index, device)
def _update_device(self, old_device: DeviceEntry, new_device: DeviceEntry) -> None:
"""Update a device and the index."""
self.devices[new_device.id] = new_device
devices_index = self._devices_index[REGISTERED_DEVICE]
_remove_device_from_index(devices_index, old_device)
_add_device_to_index(devices_index, new_device)
def _clear_index(self):
"""Clear the index."""
self._devices_index = {
REGISTERED_DEVICE: {IDX_IDENTIFIERS: {}, IDX_CONNECTIONS: {}},
DELETED_DEVICE: {IDX_IDENTIFIERS: {}, IDX_CONNECTIONS: {}},
}
def _rebuild_index(self):
"""Create the index after loading devices."""
self._clear_index()
for device in self.devices.values():
_add_device_to_index(self._devices_index[REGISTERED_DEVICE], device)
for device in self.deleted_devices.values():
_add_device_to_index(self._devices_index[DELETED_DEVICE], device)
@callback
def async_get_or_create(
self,
*,
config_entry_id,
connections=None,
identifiers=None,
manufacturer=_UNDEF,
model=_UNDEF,
name=_UNDEF,
default_manufacturer=_UNDEF,
default_model=_UNDEF,
default_name=_UNDEF,
sw_version=_UNDEF,
entry_type=_UNDEF,
via_device=None,
):
"""Get device. Create if
|
it doesn't exist."""
if not identifiers and not connections:
return None
if identifiers is None:
identifiers = set()
if connections is None:
|
connections = set()
else:
connections = _normalize_connections(connections)
device = self.async_get_device(identifiers, connections)
if device is None:
deleted_device = self._async_get_deleted_device(identifiers, connections)
if deleted_device is None:
device = DeviceEntry(is_new=True)
else:
self._remove_device(deleted_device)
device = deleted_device.to_device_entry()
self._add_device
|
pusateri/canner
|
canner/personalities/junos.py
|
Python
|
gpl-3.0
| 1,104
| 0.001812
|
#
# Copyright 2007 !j Incorporated
#
# This file is part of Canner.
#
# Canner is free software: you can redist
|
ribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Canner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for m
|
ore details.
#
# You should have received a copy of the GNU General Public License
# along with Canner. If not, see <http://www.gnu.org/licenses/>.
#
from . import Personality
class JUNOSPersonality(Personality):
os_name = "JUNOS"
commands_to_probe = ()
def examine_evidence(self, command, output):
if command == "__login__":
self.examine_with_pattern(output, 0.8, r"--- JUNOS ")
def setup(self, session):
session.perform_command("set cli screen-length 0")
session.perform_command("set cli screen-width 0")
|
zbqf109/goodo
|
openerp/addons/sale_crm/res_users.py
|
Python
|
gpl-3.0
| 261
| 0.003831
|
# -*- coding: utf-8 -*-
from openerp.osv import osv, field
|
s
import openerp.addons.product.product
class res_users(osv.osv):
_inherit = 'res.users'
_columns = {
'target_sales_invoiced': fields.integer('Invoiced in Sale Orders Target'),
}
| |
nijinashok/sos
|
sos/plugins/grub2.py
|
Python
|
gpl-2.0
| 2,119
| 0
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Grub2(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""GRUB2 bootloader
"""
plugin_name = 'grub2'
profiles = ('boot',)
packages = ('grub2', 'grub2-efi')
def setup(self):
self.add_copy_spec([
"/boot/efi/EFI/*/grub.cfg",
"/boot/grub2/grub.cfg",
"/boot/grub2/grubenv",
"/boot/grub/grub.cfg",
"/etc/default/grub",
"/etc/grub2.cfg",
"/etc/grub.d"
])
self.add
|
_cmd_output("ls -lanR /boot")
# call grub2-mkconfig with GRUB_DISABLE_OS_PROBER=true to prevent
# possible unwanted loading of some kernel modules
env = {}
env['GRUB_DISABLE_OS_PROBER'] = 'true'
self.add_cmd_output("grub2-mkconfig", env=env)
def postproc(self):
# the trailing space is required; python treats '_' as whitespace
# causing the passwd_exp to match pbkdf2 passwords and mangle them.
passwd_exp = r"(passw
|
ord )\s*(\S*)\s*(\S*)"
passwd_pbkdf2_exp = r"(password_pbkdf2)\s*(\S*)\s*(\S*)"
passwd_sub = r"\1 \2 ********"
passwd_pbkdf2_sub = r"\1 \2 grub.pbkdf2.********"
self.do_cmd_output_sub(
"grub2-mkconfig",
passwd_pbkdf2_exp,
passwd_pbkdf2_sub
)
self.do_cmd_output_sub(
"grub2-mkconfig",
passwd_exp,
passwd_sub
)
self.do_path_regex_sub(
r".*\/grub\.",
passwd_exp,
passwd_sub
)
self.do_path_regex_sub(
r".*\/grub\.",
passwd_pbkdf2_exp,
passwd_pbkdf2_sub
)
# vim: set et ts=4 sw=4 :
|
Turupawn/website
|
games/migrations/0013_auto_20161001_0143.py
|
Python
|
agpl-3.0
| 561
| 0.001783
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-30 23:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db
|
.models.deletion
class Migration(migrations.Migration):
dependencies = [
|
('games', '0012_remove_gamelink_accepted_at'),
]
operations = [
migrations.AlterField(
model_name='gamelink',
name='game',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='links', to='games.Game'),
),
]
|
umeckel/FS_coapy
|
coapy/connection.py
|
Python
|
bsd-3-clause
| 41,116
| 0.004937
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 People Power Co.
# All rights reserved.
#
# This open source code was developed with funding from People Power Company
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# - Neither the name of the People Power Corporation nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PEOPLE POWER CO. OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE
#
import coapy.options
import coapy.constants
import socket
import struct
import binascii
import fcntl
import random
import select
import time
import os
class Message (object):
"""Represent the components of a CoAP message.
- :attr:`.transaction_type`
- :attr:`.code`
- :attr:`.options`
- :attr:`.payload`
The transaction ID is not recorded in the message instance.
Rather, it is recorded in a :class:`TransmissionRecord` or
:class:`ReceptionRecord`.
"""
version = property(lambda _s: 1, None, None, "The CoAP protocol version.")
CON = 0
"""The message transaction type indicating a confirmable message
(one that requires an acknowledgement)."""
NON = 1
"""The message transaction type indicating a non-confirmable
message (one that does not evoke an acknowledgement)."""
ACK = 2
"""The message transaction type indicating acknowledgement of a
:attr:`confirmable<.CON>` message. Note that such a message may
also include a response payload that pertains to the acknowledged
message."""
RST = 3
"""The message transaction type indicating that a received
:attr:`confirmable<.CON>` message could not be processed due to
insufficient context."""
_TransactionTypeMap = { CON : 'CON',
NON : 'NON',
ACK : 'ACK',
RST : 'RST' }
OptionKeywords = { 'content_type' : coapy.options.ContentType,
'max_age' : coapy.options.MaxAge,
'proxy_uri' : coapy.options.ProxyUri,
'etag' : coapy.options.Etag,
'uri_host' : coapy.options.UriHost,
'location_path' : coapy.options.LocationPath,
'uri_port' : coapy.options.UriPort,
'location_query' : coapy.options.LocationQuery,
'uri_path' : coapy.options.UriPath,
'token' : coapy.options.Token,
'accept' : coapy.options.Accept,
'if_match' : coapy.options.IfMatch,
'uri_query' : coapy.options.UriQuery,
'if_none_match' : coapy.options.IfNoneMatch }
"""A map from Python identifiers to :mod:`option classes<coapy.options>`.
These identifiers can be provided as keyword parameters to the
:meth:`Message.__init__` method; the corresponding option class will be
|
invoked with the parameter value to create an option that is
associated with the message."""
def __init__ (self, transaction_type=CON, code=0, payload='', sockname=('127.0.0.1', coapy.COAP_PORT), uri_path = '', **kw):
"""Create a Message instance.
As a
|
convenience, message options can be created from keyword
parameters if the keywords are present in
:attr:`.OptionKeywords`.
:param transaction_type: One of :attr:`.CON`, :attr:`.NON`,
:attr:`.ACK`, :attr:`.RST`. The message transaction type
cannot be modified after creation.
:param code: The integral code identifying the method of a
request message, or the disposition in a response message.
By default, the code value is ``0``, indicating absence of
REST content in the message (as suited for :attr:`.RST` or
:attr:`.ACK` when indicating an asynchronous response will
follow)
:param payload: An optional REST payload for the message. If
not provided, the message will have no payload unless
subsequently assigned.
"""
if not (transaction_type in (self.CON, self.NON, self.ACK, self.RST)):
raise ValueError()
self.__transactionType = transaction_type
self.__code = code
self.__options = {}
self.__payload = payload
self.__sockname = sockname
# TODO FS_coapy
# bspw URI Path muss 'gestückelt' werden
# wird zum versenden von Nachrichten dringend benötigt
self.set_uri_path(uri_path)
for (k, v) in kw.iteritems():
kw_type = self.OptionKeywords.get(k)
if kw_type is not None:
self.addOption(kw_type(v))
__options = None
def _get_options (self):
"""A tuple containing the :mod:`options <coapy.options>`
associated with the message.
The options are sorted in increasing value of option type.
"""
return tuple(sorted(self.__options.itervalues(), lambda _a,_b: cmp(_a[0].Type, _b[0].Type)))
options = property(_get_options)
# TODO FS_coapy
# Same Option Type can occur multiple times
def addOption (self, opt):
"""Add a new option instance.
If the option can appear multiple times, this method is
intended to add the new value to the existing ones.
"""
if self.__options.has_key(type(opt)):
self.__options[type(opt)].append(opt)
else:
self.__options[type(opt)] = [opt]
return self
def replaceOption (self, opt):
"""Add a new option instance.
If the option is already present in message, its previous
value(s) is replaced by the new one.
"""
self.__options[type(opt)] = [opt]
return self
def _classForOption (self, opt):
if isinstance(opt, coapy.options._Base):
opt = type(opt)
elif isinstance(opt, int):
opt = coapy.options.Registry.get(opt)
if not issubclass(opt, coapy.options._Base):
raise ValueError()
return opt
def deleteOption (self, opt):
"""Remove the option from the message.
:param opt: An option, specified as an option instance, an
option class, or the type code of an option.
"""
self.__options.pop(self._classForOption(opt))
# TODO FS_coap
# This function must handle that Options can occur multiple times
def findOption (self, opt):
"""Locate the given option within the message.
Returns ``None`` if no matching option can be found.
:param opt: An option, specified as an option instance, an
option class, or the type code of an option.
"""
option = self.__options.get(self._classForOption(opt))
if option is None:
return None
else:
if option[0].Repeatable:
return
|
ovnicraft/edx-platform
|
lms/urls.py
|
Python
|
agpl-3.0
| 33,307
| 0.002252
|
"""
URLs for LMS
"""
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.views.generic.base import RedirectView
from ratelimitbackend import admin
from django.conf.urls.static import static
import django.contrib.auth.views
from microsite_configuration import microsite
import auth_exchange.views
from config_models.views import ConfigurationModelCurrentAPIView
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
# Uncomment the next two lines to enable the admin:
if settings.DEBUG or settings.FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
admin.autodiscover()
# Use urlpatterns formatted as within the Django docs with first parameter "stuck" to the open parenthesis
urlpatterns = (
'',
url(r'^$', 'branding.views.index', name="root"), # Main marketing page, or redirect to courseware
url(r'^dashboard$', 'student.views.dashboard', name="dashboard"),
url(r'^login_ajax$', 'student.views.login_user', name="login"),
url(r'^login_ajax/(?P<error>[^/]*)$', 'student.views.login_user'),
url(r'^email_confirm/(?P<key>[^/]*)$', 'student.views.confirm_email_change'),
url(r'^event$', 'track.views.user_track'),
url(r'^performance$', 'performance.views.performance_log'),
url(r'^segmentio/event$', 'track.views.segmentio.segmentio_event'),
# TODO: Is this used anymore? What is STATIC_GRAB?
url(r'^t/(?P<template>[^/]*)$', 'static_template_view.views.index'),
url(r'^accounts/manage_user_standing', 'student.views.manage_user_standing',
name='manage_user_standing'),
url(r'^accounts/disable_account_ajax$', 'student.views.disable_account_ajax',
name="disable_account_ajax"),
url(r'^logout$', 'student.views.logout_user', name='logout'),
url(r'^create_account$', 'student.views.create_account', name='create_account'),
url(r'^activate/(?P<key>[^/]*)$', 'student.views.activate_account', name="activate"),
url(r'^password_reset/$', 'student.views.password_reset', name='password_reset'),
## Obsolete Django views for password resets
## TODO: Replace with Mako-ized views
url(r'^password_change/$', 'django.contrib.auth.views.password_change',
name='password_change'),
url(r'^password_change_done/$', 'django.contrib.auth.views.password_change_done',
name='password_change_done'),
url(r'^password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
'student.views.password_reset_confirm_wrapper',
name='password_reset_confirm'),
url(r'^password_reset_complete/$', 'django.contrib.auth.views.password_reset_complete',
name='password_reset_complete'),
url(r'^password_reset_done/$', 'django.contrib.auth.views.password_reset_done',
name='password_reset_done'),
url(r'^heartbeat$', include('heartbeat.urls')),
# Note: these are older versions of the User API that will eventually be
# subsumed by api/user listed below.
url(r'^user_api/', include('openedx.core.djangoapps.user_api.legacy_urls')),
url(r'^notifier_api/', include('notifier_api.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
# Feedback Form endpoint
url(r'^submit_feedback$', 'util.views.submit_feedback'),
# Enrollment API RESTful endpoints
url(r'^api/enrollment/v1/', include('enrollment.urls')),
# Courseware search endpoints
url(r'^search/', include('search.urls')),
# Course content API
url(r'^api/course_structure/', include('course_structure_api.urls', namespace='course_structure_api')),
# Course API
url(r'^api/courses/', include('course_api.urls')),
# User API endpoints
url(r'^api/user/', include('openedx.core.djangoapps.user_api.urls')),
# Bookmarks API endpoints
url(r'^api/bookmarks/', include('openedx.core.djangoapps.bookmarks.urls')),
# Profile Images API endpoints
url(r'^api/profile_images/', include('openedx.core.djangoapps.profile_images.urls')),
# Video Abstraction Layer used to allow video teams to manage video assets
# independently of courseware. https://github.com/edx/edx-val
url(r'^api/val/v0/', include('edxval.urls')),
url(r'^api/commerce/', include('commerce.api.urls', namespace='commerce_api')),
url(r'^api/credit/', include('openedx.core.djangoapps.credit.urls', app_name="credit", namespace='credit')),
)
if settings.FEATURES["ENABLE_COMBINED_LOGIN_REGISTRATION"]:
# Backwards compatibility with old URL structure, but serve the new views
urlpatterns += (
url(r'^login$', 'student_account.views.login_and_registration_form',
{'initial_mode': 'login'}, name="signin_user"),
url(r'^register$', 'student_account.views.login_and_registration_form',
{'initial_mode': 'register'}, name="register_user"),
)
else:
# Serve the old views
urlpatterns += (
url(r'
|
^login$', 'student.views.signin_user', name="signin_user"),
url(r'^register$', 'student.views.register_user', name="register_user"),
)
if settings.FEATURES["ENABLE_MOBILE_REST_API"]:
|
urlpatterns += (
url(r'^api/mobile/v0.5/', include('mobile_api.urls')),
)
# if settings.FEATURES.get("MULTIPLE_ENROLLMENT_ROLES"):
urlpatterns += (
# TODO Namespace these!
url(r'^verify_student/', include('verify_student.urls')),
url(r'^course_modes/', include('course_modes.urls')),
)
js_info_dict = {
'domain': 'djangojs',
# We need to explicitly include external Django apps that are not in LOCALE_PATHS.
'packages': ('openassessment',),
}
urlpatterns += (
# Serve catalog of localized strings to be rendered by Javascript
url(r'^i18n.js$', 'django.views.i18n.javascript_catalog', js_info_dict),
)
# sysadmin dashboard, to see what courses are loaded, to delete & load courses
if settings.FEATURES["ENABLE_SYSADMIN_DASHBOARD"]:
urlpatterns += (
url(r'^sysadmin/', include('dashboard.sysadmin_urls')),
)
urlpatterns += (
url(r'^support/', include('support.urls', app_name="support", namespace='support')),
)
# Semi-static views (these need to be rendered and have the login bar, but don't change)
urlpatterns += (
url(r'^404$', 'static_template_view.views.render',
{'template': '404.html'}, name="404"),
)
# Favicon
favicon_path = microsite.get_value('favicon_path', settings.FAVICON_PATH)
urlpatterns += (url(
r'^favicon\.ico$',
RedirectView.as_view(url=settings.STATIC_URL + favicon_path, permanent=True)
),)
# Semi-static views only used by edX, not by themes
if not settings.FEATURES["USE_CUSTOM_THEME"]:
urlpatterns += (
url(r'^blog$', 'static_template_view.views.render',
{'template': 'blog.html'}, name="blog"),
url(r'^contact$', 'static_template_view.views.render',
{'template': 'contact.html'}, name="contact"),
url(r'^donate$', 'static_template_view.views.render',
{'template': 'donate.html'}, name="donate"),
url(r'^faq$', 'static_template_view.views.render',
{'template': 'faq.html'}, name="faq"),
url(r'^help$', 'static_template_view.views.render',
{'template': 'help.html'}, name="help_edx"),
url(r'^jobs$', 'static_template_view.views.render',
{'template': 'jobs.html'}, name="jobs"),
url(r'^news$', 'static_template_view.views.render',
{'template': 'news.html'}, name="news"),
url(r'^press$', 'static_template_view.views.render',
{'template': 'press.html'}, name="press"),
url(r'^media-kit$', 'static_template_view.views.render',
{'template': 'media-kit.html'}, name="media-kit"),
# TODO: (bridger) The copyright has been removed until it is updated for edX
# url(r'^copyright$', 'static_template_view.views.render',
# {'template': 'copyright.html'}, name="copyright"),
# Press releases
url(r'^press/([_a-zA-Z0-9-]+)$', 'static_template_view.views.render_press_release', name='press_release'),
)
# Only enable URLs for those marketing links actually enabled in the
# settin
|
maciekswat/dolfin_1.3.0
|
site-packages/dolfin_utils/pjobs/slurm.py
|
Python
|
gpl-3.0
| 1,881
| 0.002127
|
#!/usr/bin/env python
# Copyright (C) 2013 Johannes Ring
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
template = """#!/bin/bash
%(sbatch)s
%(paths)s
%(setup)s
%(job)s
"""
def job_script(job, jobname, nodes, ppn, walltime, mem, vmem, workdir, \
email, paths, setup, keep_environment, queue, parallel_environment):
"""
|
Generate a slurm specific job script.
"""
sbatch = "#SBATCH --job-name=" + jobname
sbatch += " --time=%d:00:00" % walltime
sbatch += " --ntasks=" + str(nodes)
sbatch += " --cpus-per-task=" + str(ppn)
if mem:
sbatch += " --mem-per-cpu=" + mem
if email:
sbatch += " --mail-user=" + email
if keep_environment:
sbatch += " --get-user-env"
# Issue warnings on not supported arguments
if vmem:
print "Warning: 'vmem' is no
|
t supported for the 'slurm' backend"
if queue:
print "Warning: 'queue' is not supported for the 'slurm' backend"
if parallel_environment:
print "Warning: 'parallel_environment' is not supported for the 'slurm' backend"
args = dict(sbatch=sbatch,
workdir=workdir,
paths=paths,
setup=setup,
job=job)
return template % args
|
pymedusa/SickRage
|
lib/unrar2/windows.py
|
Python
|
gpl-3.0
| 12,977
| 0.000848
|
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE W
|
ARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PAR
|
TICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Low level interface - see UnRARDLL\UNRARDLL.TXT
from __future__ import generators
import ctypes
import ctypes.wintypes
import os
import os.path
import re
import time
import sys
from .rar_exceptions import *
if sys.version_info > (3,3) and sys.stderr is not None:
import faulthandler
faulthandler.enable()
if sys.version_info[0] >= 3:
def string_from_bytes(s):
return s.decode(sys.getdefaultencoding())
def bytes_from_string(s):
return s.encode(sys.getdefaultencoding())
else:
def string_from_bytes(s):
return s
def bytes_from_string(s):
return s
ERAR_END_ARCHIVE = 10
ERAR_NO_MEMORY = 11
ERAR_BAD_DATA = 12
ERAR_BAD_ARCHIVE = 13
ERAR_UNKNOWN_FORMAT = 14
ERAR_EOPEN = 15
ERAR_ECREATE = 16
ERAR_ECLOSE = 17
ERAR_EREAD = 18
ERAR_EWRITE = 19
ERAR_SMALL_BUF = 20
ERAR_UNKNOWN = 21
ERAR_MISSING_PASSWORD = 22
RAR_OM_LIST = 0
RAR_OM_EXTRACT = 1
RAR_SKIP = 0
RAR_TEST = 1
RAR_EXTRACT = 2
RAR_VOL_ASK = 0
RAR_VOL_NOTIFY = 1
RAR_DLL_VERSION = 3
# enum UNRARCALLBACK_MESSAGES
UCM_CHANGEVOLUME = 0
UCM_PROCESSDATA = 1
UCM_NEEDPASSWORD = 2
architecture_bits = ctypes.sizeof(ctypes.c_voidp) * 8
dll_name = "unrar.dll"
if architecture_bits == 64:
dll_name = "x64\\unrar64.dll"
volume_naming1 = re.compile("[.]r([0-9]{2})$")
volume_naming2 = re.compile("[.]([0-9]{3})[.]rar$")
volume_naming3 = re.compile("[.]part([0-9]+)[.]rar$")
try:
dll_filename = os.path.join(os.path.split(__file__)[0], 'UnRARDLL', dll_name)
if sys.version_info[:3] == (2, 7, 13):
# http://bugs.python.org/issue29082
dll_filename = str(dll_filename)
unrar = ctypes.WinDLL(dll_filename)
except WindowsError:
dll_filename = dll_name
if sys.version_info[:3] == (2, 7, 13):
# http://bugs.python.org/issue29082
dll_filename = str(dll_filename)
unrar = ctypes.WinDLL(dll_filename)
class RAROpenArchiveDataEx(ctypes.Structure):
def __init__(self, ArcName=None, ArcNameW=u'', OpenMode=RAR_OM_LIST):
self.CmtBuf = ctypes.c_buffer(64 * 1024)
ctypes.Structure.__init__(self, ArcName=ArcName, ArcNameW=ArcNameW,
OpenMode=OpenMode,
_CmtBuf=ctypes.addressof(self.CmtBuf),
CmtBufSize=ctypes.sizeof(self.CmtBuf))
_fields_ = [
('ArcName', ctypes.c_char_p),
('ArcNameW', ctypes.c_wchar_p),
('OpenMode', ctypes.c_uint),
('OpenResult', ctypes.c_uint),
('_CmtBuf', ctypes.c_voidp),
('CmtBufSize', ctypes.c_uint),
('CmtSize', ctypes.c_uint),
('CmtState', ctypes.c_uint),
('Flags', ctypes.c_uint),
('Reserved', ctypes.c_uint * 32),
]
class RARHeaderDataEx(ctypes.Structure):
def __init__(self):
self.CmtBuf = ctypes.c_buffer(64 * 1024)
ctypes.Structure.__init__(self, _CmtBuf=ctypes.addressof(self.CmtBuf),
CmtBufSize=ctypes.sizeof(self.CmtBuf))
_fields_ = [
('ArcName', ctypes.c_char * 1024),
('ArcNameW', ctypes.c_wchar * 1024),
('FileName', ctypes.c_char * 1024),
('FileNameW', ctypes.c_wchar * 1024),
('Flags', ctypes.c_uint),
('PackSize', ctypes.c_uint),
('PackSizeHigh', ctypes.c_uint),
('UnpSize', ctypes.c_uint),
('UnpSizeHigh', ctypes.c_uint),
('HostOS', ctypes.c_uint),
('FileCRC', ctypes.c_uint),
('FileTime', ctypes.c_uint),
('UnpVer', ctypes.c_uint),
('Method', ctypes.c_uint),
('FileAttr', ctypes.c_uint),
('_CmtBuf', ctypes.c_voidp),
('CmtBufSize', ctypes.c_uint),
('CmtSize', ctypes.c_uint),
('CmtState', ctypes.c_uint),
('Reserved', ctypes.c_uint * 1024),
]
def DosDateTimeToTimeTuple(dosDateTime):
"""Convert an MS-DOS format date time to a Python time tuple.
"""
dos_date = dosDateTime >> 16
dos_time = dosDateTime & 0xffff
day = dos_date & 0x1f
month = (dos_date >> 5) & 0xf
year = 1980 + (dos_date >> 9)
second = 2 * (dos_time & 0x1f)
minute = (dos_time >> 5) & 0x3f
hour = dos_time >> 11
return time.localtime(
time.mktime((year, month, day, hour, minute, second, 0, 1, -1)))
def _wrap(restype, func, argtypes):
result = func
result.argtypes = argtypes
result.restype = restype
return result
RARGetDllVersion = _wrap(ctypes.c_int, unrar.RARGetDllVersion, [])
RAROpenArchiveEx = _wrap(ctypes.wintypes.HANDLE, unrar.RAROpenArchiveEx,
[ctypes.POINTER(RAROpenArchiveDataEx)])
RARReadHeaderEx = _wrap(ctypes.c_int, unrar.RARReadHeaderEx,
[ctypes.wintypes.HANDLE,
ctypes.POINTER(RARHeaderDataEx)])
_RARSetPassword = _wrap(ctypes.c_int, unrar.RARSetPassword,
[ctypes.wintypes.HANDLE, ctypes.c_char_p])
def RARSetPassword(handle, password):
_RARSetPassword(handle, password)
RARProcessFile = _wrap(ctypes.c_int, unrar.RARProcessFile,
[ctypes.wintypes.HANDLE, ctypes.c_int, ctypes.c_char_p,
ctypes.c_char_p])
RARCloseArchive = _wrap(ctypes.c_int, unrar.RARCloseArchive,
[ctypes.wintypes.HANDLE])
# The author of the UnRAR library uses "long" as the types of all the parameters,
# even if some of them are pointers *facepalm*
UNRARCALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_voidp, ctypes.c_voidp,
ctypes.c_voidp, ctypes.c_voidp)
RARSetCallback = _wrap(ctypes.c_int, unrar.RARSetCallback,
[ctypes.wintypes.HANDLE, UNRARCALLBACK, ctypes.c_long])
RARExceptions = {
ERAR_NO_MEMORY: MemoryError,
ERAR_BAD_DATA: ArchiveHeaderBroken,
ERAR_BAD_ARCHIVE: InvalidRARArchive,
ERAR_EOPEN: FileOpenError,
}
class PassiveReader:
"""Used for reading files to memory"""
def __init__(self, usercallback=None):
self.buf = []
self.ucb = usercallback
def _callback(self, msg, UserData, P1, P2):
if msg == UCM_PROCESSDATA:
data = (ctypes.c_char * P2).from_address(P1).raw
if self.ucb is not None:
self.ucb(data)
else:
self.buf.append(data)
return 1
def get_result(self):
return b''.join(self.buf)
class RarInfoIterator(object):
def __init__(self, arc):
self.arc = arc
self.index = 0
self.headerData = RARHeaderDataEx()
self.res = RARReadHeaderEx(self.arc._handle,
ctypes.byref(self.headerData))
if self.res in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:
raise IncorrectRARPassword
self.arc.lockStatus = "locked"
self.arc.needskip = False
def __iter__(self):
return self
def __next__(self):
if self.index > 0:
if self.arc.needskip:
RARProcessFile(self.arc._handle, RAR_SKIP, None, None)
self.res = RARReadHeaderEx(self.a
|
xspager/openbaybrowser
|
csv_import/management/commands/import_csv.py
|
Python
|
mit
| 1,058
| 0.021739
|
import csv
from django.core.management.base import BaseCommand, CommandError
from torrent.models import
|
Torrent
class Command(BaseCommand):
args = '<poll_id poll_id ...>'
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
with open("/home/dlemos/tmpfs/tmp/torrents_mini.csv") as csvfile:
#reader = csv.reader(
|
csvfile, delimiter='|', quotechar='"')
try:
#for row in reader:
torrents = []
for line in csvfile:
rows = line.split('|')
if len(rows) != 7:
continue
torrent = Torrent(
name = rows[0],
size= rows[1],
hash = rows[2],
num_files = rows[3],
category = rows[4]
)
torrents.append(torrent)
Torrent.objects.bulk_create(torrents)
except Exception, e:
import pdb; pdb.set_trace()
|
RT-Thread/rtthread_fsl
|
utils/mdp/eMPL-pythonclient/euclid.py
|
Python
|
lgpl-2.1
| 69,409
| 0.003472
|
#!/usr/bin/env python
#
# euclid graphics maths module
#
# Copyright (c) 2006 Alex Holkner
# Alex.Holkner@mail.google.com
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''euclid graphics maths module
Documentation and tests are included in the file "euclid.txt", or online
at http://code.google.com/p/pyeuclid
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
__revision__ = '$Revision$'
import math
import operator
import types
# Some magic here. If _use_slots is True, the classes will derive from
# object and will define a __slots__ class variable. If _use_slots is
# False, classes will be old-style and will not define __slots__.
#
# _use_slots = True: Memory efficient, probably faster in future versions
# of Python, "better".
# _use_slots = False: Ordinary classes, much faster than slots in current
# versions of Python (2.4 and 2.5).
_use_slots = True
# If True, allows components of Vector2 and Vector3 to be set via swizzling;
# e.g. v.xyz = (1, 2, 3). This is much, much slower than the more verbose
# v.x = 1; v.y = 2; v.z = 3, and slows down ordinary element setting as
# well. Recommended setting is False.
_enable_swizzle_set = False
# Requires class to derive from object.
if _enable_swizzle_set:
_use_slots = True
# Implement _use_slots magic.
class _EuclidMetaclass(type):
def __new__(cls, name, bases, dct):
if '__slots__' in dct:
dct['__getstate__'] = cls._create_getstate(dct['__slots__'])
dct['__setstate__'] = cls._create_setstate(dct['__slots__'])
if _use_slots:
return type.__new__(cls, name, bases + (object,), dct)
else:
if '__slots__' in dct:
del dct['__slots__']
return types.ClassType.__new__(types.ClassType, name, bases, dct)
@classmethod
def _create_getstate(cls, slots):
def __getstate__(self):
d = {}
for slot in slots:
d[slot] = getattr(self, slot)
return d
return __getstate__
@classmethod
def _create_setstate(cls, slots):
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
return __setstate__
__metaclass__ = _EuclidMetaclass
class Vector2:
__slots__ = ['x', 'y']
__hash__ = None
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __copy__(self):
return self.__class__(self.x, self.y)
copy = __copy__
def __repr__(self):
return 'Vector2(%.2f, %.2f)' % (self.x, self.y)
def __eq__(self, other):
if isinstance(other, Vector2):
return self.x == other.x and \
self.y == other.y
else:
assert hasattr(other, '_
|
_len
|
__') and len(other) == 2
return self.x == other[0] and \
self.y == other[1]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0
def __len__(self):
return 2
def __getitem__(self, key):
return (self.x, self.y)[key]
def __setitem__(self, key, value):
l = [self.x, self.y]
l[key] = value
self.x, self.y = l
def __iter__(self):
return iter((self.x, self.y))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y)['xy'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y]
for c, v in map(None, name, value):
l['xy'.index(c)] = v
self.x, self.y = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector2):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector2
else:
_class = Point2
return _class(self.x + other.x,
self.y + other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x + other[0],
self.y + other[1])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector2):
self.x += other.x
self.y += other.y
else:
self.x += other[0]
self.y += other[1]
return self
def __sub__(self, other):
if isinstance(other, Vector2):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector2
else:
_class = Point2
return _class(self.x - other.x,
self.y - other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x - other[0],
self.y - other[1])
def __rsub__(self, other):
if isinstance(other, Vector2):
return Vector2(other.x - self.x,
other.y - self.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(other.x - self[0],
other.y - self[1])
def __mul__(self, other):
assert type(other) in (int, long, float)
return Vector2(self.x * other,
self.y * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(self.x, other),
operator.div(self.y, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(other, self.x),
operator.div(other, self.y))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(self.x, other),
operator.floordiv(self.y, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(other, self.x),
operator.floordiv(other, self.y))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(self.x, other),
operator.truediv(self.y, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(other, self.x),
operator.truediv(other, self.y))
def __neg__(self):
return Vector2(-self.x,
-self.y)
__pos__ = __copy__
|
redhat-openstack/trove
|
trove/tests/scenario/groups/guest_log_group.py
|
Python
|
apache-2.0
| 9,327
| 0
|
# Copyright 2015 Tesora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.scenario.groups import instance_create_group
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.guest_log_group"
@test(depends_on_groups=[instance_create_group.GROUP], groups=[GROUP])
class GuestLogGroup(TestGroup):
"""Test Guest Log functionality."""
def __init__(self):
super(GuestLogGroup, self).__init__(
'guest_log_runners', 'GuestLogRunner')
@test
def test_log_list(self):
"""Test that log-list works."""
self.test_runner.run_test_log_list()
@test
def test_admin_log_list(self):
"""Test that log-list works for admin user."""
self.test_runner.run_test_admin_log_list()
@test
def test_log_show(self):
"""Test that log-show works on USER log."""
self.test_runner.run_test_log_show()
@test
def test_log_enable_sys(self):
"""Ensure log-enable on SYS log fails."""
self.test_runner.run_test_log_enable_sys()
@test
def test_log_disable_sys(self):
"""Ensure log-disable on SYS log fails."""
self.test_runner.run_test_log_disable_sys()
@test
def test_log_show_unauth_user(self):
"""Ensure log-show by unauth client on USER log fails."""
self.test_runner.run_test_log_show_unauth_user()
@test
def test_log_list_unauth_user(self):
"""Ensure log-list by unauth client on USER log fails."""
self.test_runner.run_test_log_list_unauth_user()
@test
def test_log_generator_unauth_user(self):
"""Ensure log-generator by unauth client on USER log fails."""
self.test_runner.run_test_log_generator_unauth_user()
@test
def test_log_generator_publish_unauth_user(self):
"""Ensure log-generator by unauth client with publish fails."""
self.test_runner.run_test_log_generator_publish_unauth_user()
@test
def test_log_show_unexposed_user(self):
"""Ensure log-show on unexposed log fails for auth client."""
self.test_runner.run_test_log_show_unexposed_user()
@test
def test_log_enable_unexposed_user(self):
"""Ensure log-enable on unexposed log fails for auth client."""
self.test_runner.run_test_log_enable_unexposed_user()
@test
def test_log_disable_unexposed_user(self):
"""Ensure log-disable on unexposed log fails for auth client."""
self.test_runner.run_test_log_disable_unexposed_user()
@test
def test_log_publish_unexposed_user(self):
"""Ensure log-publish on unexposed log fails for auth client."""
self.test_runner.run_test_log_publish_unexposed_user()
@test
def test_log_discard_unexposed_user(self):
"""Ensure log-discard on unexposed log fails for auth client."""
self.test_runner.run_test_log_discard_unexposed_user()
@test(runs_after=[test_log_show])
def test_log_enable_user(self):
"""Test log-enable on USER log."""
self.test_runner.run_test_log_enable_user()
@test(runs_after=[test_log_enable_user])
def test_log_enable_flip_user(self):
"""Test that flipping restart-required log-enable works."""
self.test_runner.run_test_log_enable_flip_user()
@test(runs_after=[test_log_enable_flip_user])
def test_restart_datastore(self):
"""Test restart datastore if required."""
self.test_runner.run_test_restart_datastore()
@test(runs_after=[test_restart_datastore])
def test_wait_for_restart(self):
"""Wait for restart to complete."""
self.test_runner.run_test_wait_for_restart()
@test(runs_after=[test_wait_for_restart])
def test_log_publish_user(self):
"""Test log-publish on USER log."""
self.test_runner.run_test_log_publish_user()
@test(runs_after=[test_log_publish_user])
def test_add_data(self):
"""Add data for second log-publish on USER log."""
self.test_runner.run_test_add_data()
@test(runs_after=[test_add_data])
def test_verify_data(self):
"""Verify data for second log-publish on USER log."""
self.test_runner.run_test_verify_data()
@test(runs_after=[test_verify_data])
def test_log_publish_again_user(self):
"""Test log-publish again on USER log."""
self.test_runner.run_test_log_publish_again_user()
@test(runs_after=[test_log_publish_again_user])
def test_log_generator_user(self):
"""Test log-generator on USER log."""
self.test_runner.run_test_log_generator_user()
@test(runs_after=[test_log_generator_user])
def test_log_generator_publish_user(self):
"""Test log-generator with publish on USER log."""
self.test_runner.run_test_log_generator_publish_user()
@test(runs_after=[test_log_generator_publish_user])
def test_log_generator_swift_client_user(self):
"""Test log-generator on USER log with passed-in Swift client."""
self.test_runner.run_test_log_generator_swift_client_user()
@test(runs_after=[test_log_generator_swift_client_user])
def test_add_data_again(self):
"""Add more data for log-generator row-by-row test on USER log."""
self.test_runner.run_test_add_data_again()
@test(runs_after=[test_add_data_again])
def test_verify_data_again(self):
"""Verify data for log-generator row-by-row test on USER log."""
self.test_runner.run_test_verify_data_again()
@test(runs_after=[test_verify_data_again])
def test_log_generator_user_by_row(self):
"""Test log-generator on USER log row-by-row."""
self.test_runner.run_test_log_generator_user_by_row()
@test(depends_on=[test_log_publish_user],
runs_after=[test_log_generator_user_by_row])
def test_log_save_user(self):
"""Test log-save on USER log."""
self.test_runner.run_test_log_save_user()
@test(depends_on=[test_log_publish_user],
runs_after=[test_log_save_user])
def test_log_save_publish_user(self):
"""Test log-save on USER log with publish."""
self.test_runner.run_test_log_save_publish_user()
@test(runs_after=[test_log_save_publish_user])
def test_log_discard_user(self):
"""Test log-discard on USER log."""
self.test_runner.run_test_log_discard_user()
@test(runs_after=[test_log_discard_user])
def test_log_disable_user(self):
"""Test log-disable on USER log."""
self.test_runner.run_test_log_disable_user()
@test(runs_after=[test_log_disable_user])
def test_restart_datastore_again(self):
"""Test restart datastore again if required."""
self.test_runner.run_test_restart_datastore()
@test(runs_after=[test_restart_datastore_again])
def test_wait_for_restart_again(self):
"""Wait for restart to complete again."""
self.test_runner.run_test_wait_for_restart()
@test
def test_log_show_sys(se
|
lf):
"""Test that log-show works for SYS log."""
self.test_runner.run_test_log_show_sys()
@test(runs_after=[test_log_show_sys])
def test_log_publish_sys(self):
"""Test log-publish on SYS log."""
self.test_runner.run_test_log_publish_sys()
@test(runs_after=[test_log_publish_sys])
def test_log_publish_again_sys(self):
"""Test l
|
og-publish again on SYS log."""
self.test_runner.run_test_log_publish_again_sys()
@test(depends_on=[test_log_publish_again_sys])
def test_log_generator_sys(self):
"""Test log-generator on SYS log."""
self.test
|
renalreg/radar
|
radar/api/views/fuan.py
|
Python
|
agpl-3.0
| 1,288
| 0.003106
|
from radar.api.serializers.fuan import FuanClinicalPictureSerializer
from radar.api.views.common import (
IntegerLookupListView,
PatientObjectDetailView,
PatientObjectListView,
StringLookupListView,
)
from radar.models.fuan import FuanClinicalPicture, RELATIVES, THP_RESULTS
class FuanClinicalPictureListView(PatientObjectListView):
serializer_class = FuanClinicalPictureSerializer
model_class = FuanClinicalPicture
class FuanClinicalPictureDetailView(PatientObjectDetailView):
serializer_class = FuanClinicalPictureSerializer
model_class = FuanClinicalPicture
class FuanRelativeListView(IntegerLookupListView):
items = RELATIVES
class FuanTHPResultListView(StringLookupListView):
items = THP_RESULTS
def register_views(app):
app.add_url_rule(
'/fuan-clini
|
cal-pictures',
view_func=FuanClinicalPictureListView.as_view('fuan_clinical_picture_list')
)
app.add_url_rule(
'/fuan-clinical-pictures/<id>',
view_func=FuanClinicalPictureDetailView.as_view('fuan_clinical_picture_detail')
)
app.add_url_rule('/fuan-relatives', view_func=FuanRelativeListView.as_view('fuan_relative_list'))
app.add_url_rule('/fuan-thp-results', view_func=FuanTHPResultListView.as_view('fu
|
an_thp_result_list'))
|
luminousflux/lflux
|
lfluxproject/lsubscribe/forms.py
|
Python
|
mit
| 846
| 0.00591
|
from django import forms
from django.db import models
from .models import Subscription
from django.utils.translation import ugettext as _
class SubscriptionForm(forms.ModelForm):
class Meta:
model = Subscription
def clean(self):
cleaned_data = super(Su
|
bscriptionForm, self).clean()
if not cleaned_data.get('email','') and not cleaned_data.get('user_id',''):
raise forms.ValidationError("need either email or user!")
return cleaned_data
class SubscriptionEmailForm(forms.ModelForm):
class Meta:
model = Subscription
fields = ['frequency', 'email', 'content_type', 'object_id']
|
widgets = {
'frequency': forms.widgets.RadioSelect(),
'object_id': forms.widgets.HiddenInput(),
'content_type': forms.widgets.HiddenInput(),
}
|
simongregory/electron
|
tools/js2asar.py
|
Python
|
mit
| 1,415
| 0.013428
|
#!/usr/bin/env python
import errno
import os
import shutil
import subprocess
import sys
import tempfile
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
def main():
archive = sys.argv[1]
js_source_files = sys.argv[2:]
output_dir = tempfile.mkdtemp()
copy_js(js_source_files, output_dir)
call_asar(archive, output_dir)
shutil.r
|
mtree(output_dir)
def copy_js(js_source_files, output_dir):
for source_file in js_source_files:
output_filename =
|
os.path.splitext(source_file)[0] + '.js'
output_path = os.path.join(output_dir, output_filename)
safe_mkdir(os.path.dirname(output_path))
shutil.copy2(source_file, output_path)
def call_asar(archive, output_dir):
js_dir = os.path.join(output_dir, 'lib')
asar = os.path.join(SOURCE_ROOT, 'node_modules', 'asar', 'bin', 'asar')
subprocess.check_call([find_node(), asar, 'pack', js_dir, archive])
def find_node():
WINDOWS_NODE_PATHs = [
'C:/Program Files (x86)/nodejs',
'C:/Program Files/nodejs',
] + os.environ['PATH'].split(os.pathsep)
if sys.platform in ['win32', 'cygwin']:
for path in WINDOWS_NODE_PATHs:
full_path = os.path.join(path, 'node.exe')
if os.path.exists(full_path):
return full_path
return 'node'
def safe_mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if __name__ == '__main__':
sys.exit(main())
|
jarn0ld/gnuradio
|
gr-audio/examples/python/dial_tone.py
|
Python
|
gpl-3.0
| 2,150
| 0.002326
|
#!/usr/bin/env python
#
# Copyright 2004,2005,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-o
|
utput", type="string", default="",
help="pcm outpu
|
t device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src0, (dst, 0))
self.connect(src1, (dst, 1))
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
|
posiputt/Sandbox
|
games.py
|
Python
|
mit
| 454
| 0.006608
|
import meistermind
|
def show_menu (games):
print "Hello!"
print "Choose your game or press Q and Enter to quit."
for g in games:
print "\t" + g + ") " + games[g]
quit = False
while not (quit):
games = {
"1": "Meistermind",
"2": "No Game Here",
}
show_menu(games)
choice = str(raw_input())
if choice.lower() == "q":
quit = True
elif int(choice) == 1:
mei
|
stermind.meistermind()
|
HiSPARC/topaz
|
130711_event_histogram/eventtime_histogram.py
|
Python
|
gpl-3.0
| 2,523
| 0.000793
|
import datetime
import numpy as np
from artist import Plot
from sapphire import Station
from sapphire.transformations.clock import datetime_to_gps, gps_to_datetime
from get_aligned_eventtimes import get_aligned, get_station_numbers
YEARS = range(2004, datetime.date.today().year + 1)
YEARS_TICKS = np.array([datetime_to_gps(datetime.date(y, 1, 1)) for y in YEARS])
YEARS_LABELS = [str(y) for y in YEARS]
def normalize_event_rates(data, station_numbers):
"""Normalize event rates using the number of detectors
Number per hour is divided by the expected number of events per hour for a
station with a certain number of detectors.
So after this a '1.3' would be on average 30% more events per hour than the
expected number of events per hour for such a station.
"""
scaled_data = data.copy()
for i, s in enumerate(sta
|
tion_numbers):
n = Station(s).n_detectors()
if n == 2:
scaled_data[i] /= 1200.
elif n == 4:
scaled_data[i] /= 2500.
scaled_data = np.where(scaled_data > 2., 2., scaled_data)
return scaled_data
def plot_histogram(data, timestamps, station_numbe
|
rs):
"""Make a 2D histogram plot of the number of events over time per station
:param data: list of lists, with the number of events.
:param station_numbers: list of station numbers in the data list.
"""
plot = Plot(width=r'\linewidth', height=r'1.3\linewidth')
plot.histogram2d(data.T[::7][1:], timestamps[::7] / 1e9,
np.arange(len(station_numbers) + 1),
type='reverse_bw', bitmap=True)
plot.set_label(gps_to_datetime(timestamps[-1]).date().isoformat(), 'upper left')
plot.set_xlimits(min=YEARS_TICKS[0] / 1e9, max=timestamps[-1] / 1e9)
plot.set_xticks(YEARS_TICKS / 1e9)
plot.set_xtick_labels(YEARS_LABELS)
plot.set_yticks(np.arange(0.5, len(station_numbers) + 0.5))
plot.set_ytick_labels(['%d' % s for s in sorted(station_numbers)],
style=r'font=\sffamily\tiny')
plot.set_axis_options('ytick pos=right')
plot.save_as_pdf('eventtime_histogram_network_hour')
if __name__ == "__main__":
if 'aligned_data_all' not in globals():
aligned_data, aligned_data_all, first, last = get_aligned()
station_numbers = get_station_numbers()
timestamps = np.arange(first, last + 3601, 3600)
scaled_data = normalize_event_rates(aligned_data_all, station_numbers)
plot_histogram(scaled_data, timestamps, station_numbers)
|
kiniou/qtile
|
libqtile/xcbq.py
|
Python
|
mit
| 31,290
| 0.000352
|
# Copyright (c) 2009-2010 Aldo Cortesi
# Copyright (c) 2010 matt
# Copyright (c) 2010, 2012, 2014 dequis
# Copyright (c) 2010 Philip Kranz
# Copyright (c) 2010-2011 Paul Colomiets
# Copyright (c) 2011 osebelin
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Tzbob
# Copyright (c) 2012, 2014 roger
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014-2015 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A minimal EWMH-aware OO layer over xpyb. This is NOT intended to be
complete - it only implements the subset of functionalty needed by qtile.
"""
from __future__ import print_function, division
import six
from xcffib.xproto import CW, WindowClass, EventMask
from xcffib.xfixes import SelectionEventMask
import xcffib
import xcffib.randr
import xcffib.xinerama
import xcffib.xproto
from . import xkeysyms
keysyms = xkeysyms.keysyms
# These should be in xpyb:
ModMasks = {
"shift": 1 << 0,
"lock": 1 << 1,
"control": 1 << 2,
"mod1": 1 << 3,
"mod2": 1 << 4,
"mod3": 1 << 5,
"mod4": 1 << 6,
"mod5": 1 << 7,
}
ModMapOrder = [
"shift",
"lock",
"control",
"mod1",
"mod2",
"mod3",
"mod4",
"mod5"
]
AllButtonsMask = 0b11111 << 8
ButtonMotionMask = 1 << 13
ButtonReleaseMask = 1 << 3
NormalHintsFlags = {
"USPosition": 1, # User-specified x, y
"USSize": 2, # User-specified width, height
"PPosition": 4, # Program-specified position
"PSize": 8, # Program-specified size
"PMinSize": 16, # Program-specified minimum size
"PMaxSize": 32, # Program-specified maximum size
"PResizeInc": 64, # Program-specified resize increments
"PAspect": 128, # Program-specified min and max aspect ratios
"PBaseSize": 256, # Program-specified base size
"PWinGravity": 512, # Program-specified window gravity
}
HintsFlags = {
"InputHint": 1, # input
"StateHint": 2, # initial_state
"IconPixmapHint": 4, # icon_pixmap
"IconWindowHint": 8, # icon_window
"IconPositionHint": 16, # icon_x & icon_y
"IconMaskHint": 32, # icon_mask
"WindowGroupHint": 64, # window_group
"MessageHint": 128, # (this bit is obsolete)
"UrgencyHint": 256, # urgency
}
# http://standards.freedesktop.org/wm-spec/latest/ar01s05.html#idm139870830002400
WindowTypes = {
'_NET_WM_WINDOW_TYPE_DESKTOP': "desktop",
'_NET_WM_WINDOW_TYPE_DOCK': "dock",
'_NET_WM_WINDOW_TYPE_TOOLBAR': "toolbar",
'_NET_WM_WINDOW_TYPE_MENU': "menu",
'_NET_WM_WINDOW_TYPE_UTILITY': "utility",
'_NET_WM_WINDOW_TYPE_SPLASH': "splash",
'_NET_WM_WINDOW_TYPE_DIALOG': "dialog",
'_NET_WM_WINDOW_TYPE_DROPDOWN_MENU': "dropdown",
'_NET_WM_WINDOW_TYPE_POPUP_MENU': "menu",
'_NET_WM_WINDOW_TYPE_TOOLTIP': "tooltip",
'_NET_WM_WINDOW_TYPE_NOTIFICATION': "notification",
'_NET_WM_WINDOW_TYPE_COMBO': "combo",
'_NET_WM_WINDOW_TYPE_DND': "dnd",
'_NET_WM_WINDOW_TYPE_NORMAL': "normal",
}
# http://standards.freedesktop.org/wm-spec/latest/ar01s05.html#idm139870829988448
WindowStates = {
None: 'normal',
'_NET_WM_STATE_FULLSCREEN': 'fullscreen',
}
# Maps property names to types and formats.
PropertyMap = {
# ewmh properties
"_NET_DESKTOP_GEOMETRY": ("CARDINAL", 32),
"_NET_SUPPORTED": ("ATOM", 32),
"_NET_SUPPORTING_WM_CHECK": ("WINDOW", 32),
"_NET_WM_NAME": ("UTF8_STRING", 8),
"_NET_WM_PID": ("CARDINAL", 32),
"_NET_CLIENT_LIST": ("WINDOW", 32),
"_NET_CLIENT_LIST_STACKING": ("WINDOW", 32),
"_NET_NUMBER_OF_DESKTOPS": ("CARDINAL", 32),
"_NET_CURRENT_DESKTOP": ("CARDINAL", 32),
"_NET_DESKTOP_NAMES": ("UTF8_STRING", 8),
"_NET_WORKAREA": ("CARDINAL", 32),
"_NET_ACTIVE_WINDOW": ("WINDOW", 32),
"_NET_WM_DESKTOP": ("CARDINAL", 32),
"_NET_WM_STRUT": ("CARDINAL", 32),
"_NET_WM_STRUT_PARTIAL": ("CARDINAL", 32),
"_NET_WM_WINDOW_OPACITY": ("CARDINAL", 32),
"_NET_WM_WINDOW_TYPE": ("CARDINAL", 32),
# Net State
"_NET_WM_STATE": ("ATOM", 32),
"_NET_WM_STATE_STICKY": ("ATOM", 32),
"_NET_WM_STATE_SKIP_TASKBAR": ("ATOM", 32),
"_NET_WM_STATE_FULLSCREEN": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_HORZ": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_VERT": ("ATOM", 32),
"_NET_WM_STATE_ABOVE": ("ATOM", 32),
"_NET_WM_STATE_BELOW": ("ATOM", 32),
"_NET_WM_STATE_MODAL": ("ATOM", 32),
"_NET_WM_STATE_HIDDEN": ("ATOM", 32),
"_NET_WM_STATE_DEMANDS_ATTENTION": ("ATOM", 32),
# ICCCM
"WM_STATE": ("WM_STATE", 32),
# Qtile-specific properties
"QTILE_INTERNAL": ("CARDINAL", 32)
}
# TODO add everything required here:
# http://standards.freedesktop.org/wm-spec/latest/ar01s03.html
SUPPORTED_ATOMS = [
# From http://standards.freedesktop.org/wm-spec/latest/ar01s03.html
'_NET_SUPPORTED',
'_NET_CLIENT_LIST',
'_NET_CLIENT_LIST_STACKING',
'_NET_CURRENT_DESKTOP',
'_NET_ACTIVE_WINDOW',
# '_NET_WORKAREA',
'_NET_SUPPORTING_WM_CHECK',
# From http://standards.freedesktop.org/wm-spec/latest/ar01s05.html
'_NET_WM_NAME',
'_NET_WM_VISIBLE_NAME',
'_NET_WM_ICON_NAME',
'_NET_WM_DESKTOP',
'_NET_WM_WINDOW_TYPE',
'_NET_WM_STATE',
'_NET_WM_STRUT',
'_NET_WM_STRUT_PARTIAL',
'_NET_WM_PID',
]
SUPPORTED_ATOMS.extend(WindowTypes.keys())
SUPPORTED_ATOMS.extend(key for key in WindowStates.keys() if key)
XCB_CONN_ERRORS = {
1: 'XCB_CONN_ERROR',
2: 'XCB_CONN_CLOSED_EXT_NOTSUPPORTED',
3: 'XCB_CONN_CLOSED_MEM_INSUFFICIENT',
4: 'XCB_CONN_CLOSED_REQ_LEN_EXCEED',
5: 'XCB_CONN_CLOSED_PARSE_ERR',
6: 'XCB_CONN_CLOSED_INVALID_SCREEN',
7: 'XCB_CONN_CLOSED_FDPASSING_FAILED',
}
class MaskMap:
"""
A general utility class that encapsulates the way the mask/value idiom
works in xpyb. It understands a special attribute _maskvalue on
objects, which will be used instead of the object value if present.
This
|
lets us passin a Font object, rather than Font.fid, for example.
"""
def __init__(self, obj):
self.mmap = []
for i in dir(obj):
if not i.startswith("_"):
self.mmap.append((getattr(obj, i), i.lowe
|
r()))
self.mmap.sort()
def __call__(self, **kwargs):
"""
kwargs: keys should be in the mmap name set
Returns a (mask, values) tuple.
"""
mask = 0
values = []
for m, s in self.mmap:
if s in kwargs:
val = kwargs.get(s)
if val is not None:
mask |= m
values.append(getattr(val, "_maskvalue", val))
del kwargs[s]
if kwargs:
raise ValueError("Unknown mask names: %s" % list(kwargs.keys()))
return mask, values
ConfigureMasks = MaskMap(xcffib.xproto.ConfigWindow)
AttributeMasks = MaskMap(CW)
GCMasks = MaskMap(xcffib.xproto.GC)
class AtomCache:
def __init__(self, conn):
self.conn = conn
self.atoms = {}
self.reverse = {}
# We c
|
virtuald/pygi-composite-templates
|
gi_composites.py
|
Python
|
lgpl-2.1
| 9,156
| 0.000655
|
#
# Copyright (C) 2015 Dustin Spicuzza <dustin@virtualroadside.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
from os.path import abspath, join
import inspect
import warnings
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
__all__ = ['GtkTemplate']
class GtkTemplateWarning(UserWarning):
pass
def _connect_func(builder, obj, signal_name, handler_name,
connect_object, flags, cls):
'''Handles GtkBuilder signal connect events'''
if connect_object is None:
extra = ()
else:
extra = (connect_object,)
# The handler name refers to an attribute on the template instance,
# so ask GtkBuilder for the template instance
template_inst = builder.get_object(cls.__gtype_name__)
if template_inst is None: # This should never happen
errmsg = "Internal error: cannot find template instance! obj: %s; " \
"signal: %s; handler: %s; connect_obj: %s; class: %s" % \
(obj, signal_name, handler_name, connect_object, cls)
warnings.warn(errmsg, GtkTemplateWarning)
return
handler = getattr(template_inst, handler_name)
if flags == GObject.ConnectFlags.AFTER:
obj.connect_after(signal_name, handler, *extra)
else:
obj.connect(signal_name, handler, *extra)
template_inst.__connected_template_signals__.add(handler_name)
def _register_template(cls, template_bytes):
'''Registers the template for the widget and hooks init_template'''
# This implementation won't work if there are nested templates, but
# we can't do that anyways due to PyGObject limitations so it's ok
if not hasattr(cls, 'set_template'):
raise TypeError("Requires PyGObject 3.13.2 or greater")
cls.set_template(template_bytes)
bound_methods = set()
bound_widgets = set()
# Walk the class, find marked callbacks and child attributes
for name in dir(cls):
o = getattr(cls, name, None)
if inspect.ismethod(o):
if hasattr(o, '_gtk_callback'):
bound_methods.add(name)
# Don't need to call this, as connect_func always gets called
#cls.bind_template_callback_full(name, o)
elif isinstance(o, _Child):
cls.bind_template_child_full(name, True, 0)
bound_widgets.add(name)
# Have to setup a special connect function to connect at template init
# because the methods are not bound yet
cls.set_connect_func(_connect_func, cls)
cls.__gtemplate_methods__ = bound_methods
cls.__gtemplate_widgets__ = bound_widgets
base_init_template = cls.init_template
cls.init_template = lambda s: _init_template(s, cls, base_init_template)
def _init_template(self, cls, base_init_template):
'''This would be better as an override for Gtk.Widget'''
# TODO: could disallow using a metaclass.. but this is good enough
# .. if you disagree, feel free to fix it and issue a PR :)
if self.__class__ is not cls:
raise TypeError("Inheritance from classes with @GtkTemplate decorators "
"is not allowed at this time")
connected_signals = set()
self.__connected_template_signals__ = connected_signals
base_init_template(self)
for name in self.__gtemplate_widgets__:
widget = self.get_template_child(cls, name)
self.__dict__[name] = widget
if widget is None:
# Bug: if you bind a template child, and one of them was
# not present, then the whole template is broken (and
# it's not currently possible for us to know which
# one is broken either -- but the stderr should show
# something useful with a Gtk-CRITICAL message)
raise AttributeError("A missing child widget was set using "
"GtkTemplate.Child and the entire "
"template is now broken (widgets: %s)" %
', '.join(self.__gtemplate_widgets__))
for name in self.__gtemplate_methods__.difference(connected_signals):
errmsg = ("Signal '%s' was declared with @GtkTemplate.Callback " +
|
"but was not present in template") % name
warnings.warn(errmsg, GtkTemplateWarning)
# TODO: Make it easier for IDE to introspect this
class _Child(object):
'''
Assign this to an attribute in your class definition and it will
be replaced with a widget defined in the UI file when init_template
is called
''
|
'
__slots__ = []
@staticmethod
def widgets(count):
'''
Allows declaring multiple widgets with less typing::
button \
label1 \
label2 = GtkTemplate.Child.widgets(3)
'''
return [_Child() for _ in range(count)]
class _GtkTemplate(object):
'''
Use this class decorator to signify that a class is a composite
widget which will receive widgets and connect to signals as
defined in a UI template. You must call init_template to
cause the widgets/signals to be initialized from the template::
@GtkTemplate(ui='foo.ui')
class Foo(Gtk.Box):
def __init__(self):
super(Foo, self).__init__()
self.init_template()
The 'ui' parameter can either be a file path or a GResource resource
path::
@GtkTemplate(ui='/org/example/foo.ui')
class Foo(Gtk.Box):
pass
To connect a signal to a method on your instance, do::
@GtkTemplate.Callback
def on_thing_happened(self, widget):
pass
To create a child attribute that is retrieved from your template,
add this to your class definition::
@GtkTemplate(ui='foo.ui')
class Foo(Gtk.Box):
widget = GtkTemplate.Child()
Note: This is implemented as a class decorator, but if it were
included with PyGI I suspect it might be better to do this
in the GObject metaclass (or similar) so that init_template
can be called automatically instead of forcing the user to do it.
.. note:: Due to limitations in PyGObject, you may not inherit from
python objects that use the GtkTemplate decorator.
'''
__ui_path__ = None
@staticmethod
def Callback(f):
'''
Decorator that designates a method to be attached to a signal from
the template
'''
f._gtk_callback = True
return f
Child = _Child
@staticmethod
def set_ui_path(*path):
'''
If using file paths instead of resources, call this *before*
loading anything that uses GtkTemplate, or it will fail to load
your template file
:param path: one or more path elements, will be joined together
to create the final path
TODO: Alternatively, could wait until first class instantiation
before registering templates? Would need a metaclass...
'''
_GtkTemplate.__ui_path__ = abspath(join(*path))
def __init__(self, ui):
self.ui = ui
def __call__(self, cls):
if not issubclass(cls, Gtk.Widget):
rais
|
kunalsharma05/django-project
|
django_project/migrations/0014_auto_20160710_1200.py
|
Python
|
bsd-3-clause
| 558
| 0.001792
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-10 12:00
from __future__ import unicode_literals
from dja
|
ngo.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
|
('django_project', '0013_auto_20160710_1124'),
]
operations = [
migrations.AlterField(
model_name='annotation',
name='comment',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='django_project.Comment'),
),
]
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/scf/v20180416/errorcodes.py
|
Python
|
mit
| 27,390
| 0.001814
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# API网关触发器创建失败。
FAILEDOPERATION_APIGATEWAY = 'FailedOperation.ApiGateway'
# 创建触发器失败。
FAILEDOPERATION_APIGW = 'FailedOperation.Apigw'
# 获取Apm InstanceId失败。
FAILEDOPERATION_APMCONFIGINSTANCEID = 'FailedOperation.ApmConfigInstanceId'
# 当前异步事件状态不支持此操作,请稍后重试。
FAILEDOPERATION_ASYNCEVENTSTATUS = 'FailedOperation.AsyncEventStatus'
# 复制函数失败。
FAILEDOPERATION_COPYFAILED = 'FailedOperation.CopyFailed'
# 不支持复制到该地域。
FAILEDOPERATION_COPYFUNCTION = 'FailedOperation.CopyFunction'
# 操作COS资源失败。
FAILEDOPERATION_COS = 'FailedOperation.Cos'
# 创建别名失败。
FAILEDOPERATION_CREATEALIAS = 'FailedOperation.CreateAlias'
# 操作失败。
FAILEDOPERATION_CREATEFUNCTION = 'FailedOperation.CreateFunction'
# 创建命名空间失败。
FAILEDOPERATION_CREATENAMESPACE = 'FailedOperation.CreateNamespace'
# 当前函数状态无法进行此操作。
FAILEDOPERATION_CREATETRIGGER = 'FailedOperation.CreateTrigger'
# 当前调试状态无法执行此操作。
FAILEDOPERATION_DEBUGMODESTATUS = 'FailedOperation.DebugModeStatus'
# 调试状态下无法更新执行超时时间。
FAILEDOPERATION_DEBUGMODEUPDATETIMEOUTFAIL = 'FailedOperation.DebugModeUpdateTimeOutFail'
# 删除别名失败。
FAILEDOPERATION_DELETEALIAS = 'FailedOperation.DeleteAlias'
# 当前函数状态无法进行此操作,请在函数状态正常时重试。
FAILEDOPERATION_DELETEFUNCTION = 'FailedOperation.DeleteFunction'
# 删除layer版本失败。
FAILEDOPERATION_DELETELAYERVERSION = 'FailedOperation.DeleteLayerVersion'
# 无法删除默认Namespace。
FAILEDOPERATION_DELETENAMESPACE = 'FailedOperation.DeleteNamespace'
# 删除触发器失败。
FAILEDOPERATION_DELETETRIGGER = 'FailedOperation.DeleteTrigger'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_FUNCTIONNAMESTATUSERROR = 'FailedOperation.FunctionNameStatusError'
|
# 函数在部署中,无法做此操作。
FAILEDOPERATION_FUNCTIONSTATUSERROR = 'FailedOperation.FunctionStatusError'
# 当前函数版本状态无法进行此操作,请在版本状态为正常时重试。
FAILEDOPERATION_FUNCTIONVERSIONSTATUSNOTACTIVE = 'FailedOperation.FunctionVersionStatusNotActive'
# 获取别名信息失败。
FAILEDOPERATION_GETALIAS = 'FailedOperation.GetAlias'
# 获取函数代码地址失败。
FAILEDOPERATION_GETFUNCTIONADDRESS = 'FailedOperation.GetFunctionAddress'
# 当前账号或命名空间处于欠费状态,请在可用时重试。
FAILEDOPERATION_INSUFFICIENTBALANCE = 'FailedOpe
|
ration.InsufficientBalance'
# 调用函数失败。
FAILEDOPERATION_INVOKEFUNCTION = 'FailedOperation.InvokeFunction'
# 命名空间已存在,请勿重复创建。
FAILEDOPERATION_NAMESPACE = 'FailedOperation.Namespace'
# 服务开通失败。
FAILEDOPERATION_OPENSERVICE = 'FailedOperation.OpenService'
# 操作冲突。
FAILEDOPERATION_OPERATIONCONFLICT = 'FailedOperation.OperationConflict'
# 创建定时预置任务失败。
FAILEDOPERATION_PROVISIONCREATETIMER = 'FailedOperation.ProvisionCreateTimer'
# 删除定时预置任务失败。
FAILEDOPERATION_PROVISIONDELETETIMER = 'FailedOperation.ProvisionDeleteTimer'
# 当前函数版本已有预置任务处于进行中,请稍后重试。
FAILEDOPERATION_PROVISIONEDINPROGRESS = 'FailedOperation.ProvisionedInProgress'
# 发布layer版本失败。
FAILEDOPERATION_PUBLISHLAYERVERSION = 'FailedOperation.PublishLayerVersion'
# 当前函数状态无法发布版本,请在状态为正常时发布。
FAILEDOPERATION_PUBLISHVERSION = 'FailedOperation.PublishVersion'
# 角色不存在。
FAILEDOPERATION_QCSROLENOTFOUND = 'FailedOperation.QcsRoleNotFound'
# 当前函数已有保留并发设置任务处于进行中,请稍后重试。
FAILEDOPERATION_RESERVEDINPROGRESS = 'FailedOperation.ReservedInProgress'
# Topic不存在。
FAILEDOPERATION_TOPICNOTEXIST = 'FailedOperation.TopicNotExist'
# 用户并发内存配额设置任务处于进行中,请稍后重试。
FAILEDOPERATION_TOTALCONCURRENCYMEMORYINPROGRESS = 'FailedOperation.TotalConcurrencyMemoryInProgress'
# 指定的服务未开通,可以提交工单申请开通服务。
FAILEDOPERATION_UNOPENEDSERVICE = 'FailedOperation.UnOpenedService'
# 更新别名失败。
FAILEDOPERATION_UPDATEALIAS = 'FailedOperation.UpdateAlias'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_UPDATEFUNCTIONCODE = 'FailedOperation.UpdateFunctionCode'
# UpdateFunctionConfiguration操作失败。
FAILEDOPERATION_UPDATEFUNCTIONCONFIGURATION = 'FailedOperation.UpdateFunctionConfiguration'
# 内部错误。
INTERNALERROR = 'InternalError'
# 创建apigw触发器内部错误。
INTERNALERROR_APIGATEWAY = 'InternalError.ApiGateway'
# ckafka接口失败。
INTERNALERROR_CKAFKA = 'InternalError.Ckafka'
# 删除cmq触发器失败。
INTERNALERROR_CMQ = 'InternalError.Cmq'
# 更新触发器失败。
INTERNALERROR_COS = 'InternalError.Cos'
# ES错误。
INTERNALERROR_ES = 'InternalError.ES'
# 内部服务异常。
INTERNALERROR_EXCEPTION = 'InternalError.Exception'
# 内部服务错误。
INTERNALERROR_GETROLEERROR = 'InternalError.GetRoleError'
# 内部系统错误。
INTERNALERROR_SYSTEM = 'InternalError.System'
# 内部服务错误。
INTERNALERROR_SYSTEMERROR = 'InternalError.SystemError'
# FunctionName取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETER_FUNCTIONNAME = 'InvalidParameter.FunctionName'
# 请求参数不合法。
INVALIDPARAMETER_PAYLOAD = 'InvalidParameter.Payload'
# RoutingConfig参数传入错误。
INVALIDPARAMETER_ROUTINGCONFIG = 'InvalidParameter.RoutingConfig'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# Action取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ACTION = 'InvalidParameterValue.Action'
# AdditionalVersionWeights参数传入错误。
INVALIDPARAMETERVALUE_ADDITIONALVERSIONWEIGHTS = 'InvalidParameterValue.AdditionalVersionWeights'
# 不支持删除默认别名,请修正后重试。
INVALIDPARAMETERVALUE_ALIAS = 'InvalidParameterValue.Alias'
# ApiGateway参数错误。
INVALIDPARAMETERVALUE_APIGATEWAY = 'InvalidParameterValue.ApiGateway'
# ApmConfig参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIG = 'InvalidParameterValue.ApmConfig'
# ApmConfigInstanceId参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGINSTANCEID = 'InvalidParameterValue.ApmConfigInstanceId'
# ApmConfigRegion参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGREGION = 'InvalidParameterValue.ApmConfigRegion'
# Args 参数值有误。
INVALIDPARAMETERVALUE_ARGS = 'InvalidParameterValue.Args'
# 函数异步重试配置参数无效。
INVALIDPARAMETERVALUE_ASYNCTRIGGERCONFIG = 'InvalidParameterValue.AsyncTriggerConfig'
# Cdn传入错误。
INVALIDPARAMETERVALUE_CDN = 'InvalidParameterValue.Cdn'
# cfs配置项重复。
INVALIDPARAMETERVALUE_CFSPARAMETERDUPLICATE = 'InvalidParameterValue.CfsParameterDuplicate'
# cfs配置项取值与规范不符。
INVALIDPARAMETERVALUE_CFSPARAMETERERROR = 'InvalidParameterValue.CfsParameterError'
# cfs参数格式与规范不符。
INVALIDPARAMETERVALUE_CFSSTRUCTIONERROR = 'InvalidParameterValue.CfsStructionError'
# Ckafka传入错误。
INVALIDPARAMETERVALUE_CKAFKA = 'InvalidParameterValue.Ckafka'
# 运行函数时的参数传入有误。
INVALIDPARAMETERVALUE_CLIENTCONTEXT = 'InvalidParameterValue.ClientContext'
# Cls传入错误。
INVALIDPARAMETERVALUE_CLS = 'InvalidParameterValue.Cls'
# 修改Cls配置需要传入Role参数,请修正后重试。
INVALIDPARAMETERVALUE_CLSROLE = 'InvalidParameterValue.ClsRole'
# Cmq传入错误。
INVALIDPARAMETERVALUE_CMQ = 'InvalidParameterValue.Cmq'
# Code传入错误。
INVALIDPARAMETERVALUE_CODE = 'InvalidParameterValue.Code'
# CodeSecret传入错误。
INVALIDPARAMETERVALUE_CODESECRET = 'InvalidParameterValue.CodeSecret'
# CodeSource传入错误。
INVALIDPARAMETERVALUE_CODESOURCE = 'InvalidParameterValue.CodeSource'
# Command[Entrypoint] 参数值有误。
INVALIDPARAMETERVALUE_COMMAND = 'InvalidParameterValue.Command'
# CompatibleRuntimes参数传入错误。
INVALIDPARAMETERVALUE_COMPATIBLERUNTIMES = 'InvalidParameterValue.CompatibleRuntimes'
# Content参数传入错误。
INVALIDPARAMETERVALUE_CONTENT = 'InvalidParameterValue.Content'
# Cos传入错误。
INVALIDPARAMETERVALUE_COS = 'InvalidParameterValue.Cos'
# CosBucketName不符合规范。
INVALIDPARAMETERVALUE_COSBUCKETNAME = 'InvalidParameterValue.CosBucketName'
# CosBucketRegion取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_COSBUCKETREGION = 'InvalidParameterValue.CosBucketRegion'
# CosObjectName不符合规范。
INVALIDPARAMETERVALUE_COSOBJECTNAME = 'InvalidParameterValue.CosObjectName'
# CustomArgument参数长度超限。
INVALIDPARAMETERVALUE_CUSTOMARGUMENT = 'InvalidParameterValue.CustomArgument'
# DateTime传入错误。
INVALIDPARAMETERVALUE_DATETIME = 'InvalidParameterValue.DateTime'
# DeadLetterConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_DEADLETTE
|
CoderBotOrg/coderbot
|
test/pigpio_mock.py
|
Python
|
gpl-2.0
| 2,965
| 0.005059
|
import unittest.mock
import time
import logging
import logging.handlers
import coderbot
logger = logging.getLogger()
class PIGPIOMock(object):
"""Implements PIGPIO librar
|
y mock class
PIGPIO is the library used to access digital General Purpose IO (GPIO),
this mock class emulates the behaviour of the inputs used by the sonar sensors: a fake signal is triggered to emulate a 85.1 distance.
Output (DC motor and Servo) are just no-op function, they implement basic parameters check via assertions.
"""
def __init__(self, host="localhost", port=None):
self.callbacks = {}
logger.info("mock called")
def set_mode(self, pin_id, pin_
|
mode):
"""mock set_mode"""
pass
def get_mode(self, pin_id):
"""mock get_mode"""
return 0
def callback(self, pin_id, edge, callback):
"""mock callback"""
self.callbacks[pin_id] = callback
return self.Callback(pin_id)
def write(self, pin_id, value):
"""mock write"""
assert(pin_id > 0 and pin_id < 32)
assert(value == 0 or value == 1)
def read(self, pin_id):
"""mock read"""
pass
def gpio_trigger(self, pin_id):
"""mock gpio_trigger"""
assert(pin_id > 0 and pin_id < 32)
# mock sonars triger and echo
if pin_id == coderbot.GPIO_CODERBOT_V_4.PIN_SONAR_1_TRIGGER or pin_id == coderbot.GPIO_CODERBOT_V_5.PIN_SONAR_1_TRIGGER:
if pin_id == coderbot.GPIO_CODERBOT_V_4.PIN_SONAR_1_TRIGGER:
GPIOS=coderbot.GPIO_CODERBOT_V_4
else:
GPIOS=coderbot.GPIO_CODERBOT_V_5
self.callbacks[GPIOS.PIN_SONAR_1_ECHO](GPIOS.PIN_SONAR_1_TRIGGER, 0, 0)
self.callbacks[GPIOS.PIN_SONAR_2_ECHO](GPIOS.PIN_SONAR_1_TRIGGER, 0, 0)
self.callbacks[GPIOS.PIN_SONAR_3_ECHO](GPIOS.PIN_SONAR_1_TRIGGER, 0, 0)
self.callbacks[GPIOS.PIN_SONAR_1_ECHO](GPIOS.PIN_SONAR_1_ECHO, 1, 0)
self.callbacks[GPIOS.PIN_SONAR_2_ECHO](GPIOS.PIN_SONAR_2_ECHO, 1, 0)
self.callbacks[GPIOS.PIN_SONAR_3_ECHO](GPIOS.PIN_SONAR_3_ECHO, 1, 0)
time.sleep(0.005)
self.callbacks[GPIOS.PIN_SONAR_1_ECHO](GPIOS.PIN_SONAR_1_ECHO, 0, 5000)
self.callbacks[GPIOS.PIN_SONAR_2_ECHO](GPIOS.PIN_SONAR_2_ECHO, 0, 5000)
self.callbacks[GPIOS.PIN_SONAR_3_ECHO](GPIOS.PIN_SONAR_3_ECHO, 0, 5000)
def set_PWM_frequency(self, pin_id, frequency):
"""mock set_PWM_frequency"""
assert(pin_id > 0 and pin_id < 32)
assert(frequency > 0)
def set_PWM_range(self, pin_id, range):
"""mock set_PWM_range"""
pass
def set_PWM_dutycycle(self, pin_id, dutycycle):
"""mock set_PWM_dutycyle"""
pass
class Callback(object):
def __init__(self, pin_id):
pass
def cancel(self):
pass
def set_pull_up_down(self, feedback_pin_A, mode):
pass
|
PerroTron/HostilPlanet
|
lib/tiles.py
|
Python
|
gpl-2.0
| 5,826
| 0.002575
|
import pygame
import tiles_basic
from tile import *
# NOTE: If you add new tiles, use t_init for regular tiles.
# tl_init and tr_init are for tiles that take up only half of the
# 16x16 tile, on the left or right side respectively.
TILES = {
# general purpose tiles
0x00: [t_init, [], None, ],
0x01: [t_init, ['solid'], tiles_basic.hit_block, 1, 1, 1, 1, ],
0x02: [t_init, ['solid'], tiles_basic.hit_breakable, 1, 1, 1, 1, ],
0x03: [t_init, ['player'], tiles_basic.hit_fire, ],
0x04: [t_init, [], None, ], # black background tile
0x05: [t_init, [], None, ], # exit sign
0x10: [t_init, ['solid'], tiles_basic.hit_block, 1, 1, 1, 1, ],
0x11: [t_init, ['solid'], tiles_basic.hit_block, 1, 0, 0, 0, ],
0x12: [t_init, ['solid'], tiles_basic.hit_fally, 1, 1, 1, 1, ],
0x15: [t_init, ['solid'], tiles_basic.hit_block, 1, 1, 1, 1, ],
0x21: [t_init, ['solid'], tiles_basic.hit_block, 1, 0, 0, 0, ],
# powerups and bonus items ...
0x17: [t_init, ['player'], tiles_basic.hit_drone, 'guardian'], # drone guardian
0x27: [t_init, ['player'], tiles_basic.hit_drone, 'defender'], # drone defender
0x37: [t_init, ['player'], tiles_basic.hit_drone, 'killer'], # drone killer
0x26: [t_init, ['player'], tiles_basic.hit_jetpack, 'double_jump'], # double_jump
0x36: [t_init, ['player'], tiles_basic.hit_jetpack, 'fly'], # fly
0x08: [t_init, ['player'], tiles_basic.hit_power, 'cannon'], # cannon
0x18: [t_init, ['player'], tiles_basic.hit_power, 'laser'], # laser
0x28: [t_init, ['player'], tiles_basic.hit_power, 'shootgun'], # shootgun
0x38: [t_init, ['player'], tiles_basic.hit_power, 'granadelauncher'], # shootgun
0x0C: [t_init, ['player'], tiles_basic.hit_life, ], # extra-life
0x1C: [t_init, ['player'], tiles_basic.hit_def, ], # extra-def
0x2C: [t_init, ['player'], tiles_basic.hit_chip, 1, ], # chip
0x2D: [t_init, ['player'], tiles_basic.hit_chip, 2, ], # chip
0x2E: [t_init, ['player'], tiles_basic.hit_chip, 3, ], # chip
0x2F: [t_init, ['player'], tiles_basic.hit_chip, 4, ], # chip
# tile animations
0x4D: [t_init, [], None, ], # torch
0x4E: [t_init, [], None, ], # torch
0x4F: [t_init, [], None, ], # torch
0x9D: [t_init, [], None, ], # red light
0x9E: [t_init, [], None, ], # red light
0xAD: [t_init, [], None, ], # cable
0xAE: [t_init, [], None, ], # cable
0xAF: [t_init, [], None, ], # cable
0xBD: [t_init, [], None, ], # cable
0xBE: [t_init, [], None, ], # cable
0xBF: [t_init, [], None, ], # cable
0x7D: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # radial
0x7E: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # radial
0x7F: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # radial
0x5D: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # lava
0x5E: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # lava
0x5F: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # lava
0xCD: [t_init, [], None, ], # acid
0xCE: [t_init, [], None, ], # acid
0xCF: [t_init, [], None, ], # acid
0xDD: [t_init, [], None, ], # acid
0xDE: [t_init, [], None, ], # acid
0xDF: [t_init, [], None, ], # acid
0x6D: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ],
|
# electron
0x6E: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # electron
0x6F: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # electron
0x4A: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # pinchos
0x4B: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # pinchos
0x4C: [t_init, ['player'], tiles_basic.hit_dmg, 1, 1, 1, 1, ], # pinchos
}
TANIMATE = [
# (starting_tile,animated list of frames incs),
(0x0C, [int(v) for v in '000000
|
01112223330000000000000000']), # extra life
(0x1C, [int(v) for v in '00000001112223330000000000000000']), # def
(0x08, [int(v) for v in '00000000000000000000000111222333']), # cannon
(0x18, [int(v) for v in '00000000000000000000000111222333']), # laser
(0x28, [int(v) for v in '00000000000000000000000111222333']), # shootgun
(0x38, [int(v) for v in '00000000000000000000000111222333']), # granadelauncher
(0x30, [int(v) for v in '1111111111111111111111111111111111111111111111111111111111111111']), # door
(0x4D, [int(v) for v in '0000000000000000111111111111111122222222222222221111111111111111']), # torch
(0x9D, [int(v) for v in '1111111111111111111100011111111111111111111111111100000000000000']), # red led
(0xAD, [int(v) for v in '0000000000000000000000000000000000000000000000000112211001100220']), # cable
(0xBD, [int(v) for v in '0000000000000000000000000000000000000000000000000112211001100220']), # cable
(0xCD, [int(v) for v in '0000000000000000000001111111111111111111112222222222222222222222']), # acid
(0xDD, [int(v) for v in '0000000000000000000001111111111111111111112222222222222222222222']), # acid
(0x7D, [int(v) for v in '00112211001122110011221100112211']), # radial
(0x5D, [int(v) for v in '0000000000000000000001111111111111111111112222222222222222222222']), # lava
(0x6D, [int(v) for v in '00000000000111111111112222222222']), # electron
(0x4A, [int(v) for v in '0000000000000000000001111111111111111111112222222222222222222222']), # pinchos acido
]
TREPLACE = [
# (tile_to_replace,replace_with)
(0x10, 0x00),
(0x11, 0x00),
]
def t_put(g, pos, n):
x, y = pos
if n not in TILES:
# print 'undefined tile:',x,y,'0x%02x'%n
t_init(g, pygame.Rect(x * TW, y * TH, TW, TH), n, [], None)
return
v = TILES[n]
v[0](g, pygame.Rect(x * TW, y * TH, TW, TH), n, *v[1:])
|
vollov/django-template
|
django/esite/populate_auto.py
|
Python
|
mit
| 2,560
| 0.010938
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'esite.settings')
import django
django.setup()
from auto.models import Car
#
def add_car(make, model, km, year, color, eng, drive,trans, icolor):
c = Car.objects.get_or_create(make=make, model=model, kilometers=km, year=year, color=color, engine_size=
|
eng, drivetrain=drive, transmition=trans, interanl_color=icolor)
def populate():
# car = Car(make='Acura',model='TL', kilometers=74673, year=2012, color='White', engine_size=3.7, drivetrain='AWD', transmition='MA')
add_car('Acura', 'TL', 74673, 2012, 'White', 3.7, 'AWD','MA','White')
add_car('Volkswagen', 'Touareg', 5344, 2015, 'Silver', 3.6, 'AWD','AU',
|
'White')
if __name__ == '__main__':
print "Starting Car population script..."
populate()
# def populate():
# python_cat = add_cat('Python')
#
# add_page(cat=python_cat,
# title="Official Python Tutorial",
# url="http://docs.python.org/2/tutorial/")
#
# add_page(cat=python_cat,
# title="How to Think like a Computer Scientist",
# url="http://www.greenteapress.com/thinkpython/")
#
# add_page(cat=python_cat,
# title="Learn Python in 10 Minutes",
# url="http://www.korokithakis.net/tutorials/python/")
#
# django_cat = add_cat("Django")
#
# add_page(cat=django_cat,
# title="Official Django Tutorial",
# url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/")
#
# add_page(cat=django_cat,
# title="Django Rocks",
# url="http://www.djangorocks.com/")
#
# add_page(cat=django_cat,
# title="How to Tango with Django",
# url="http://www.tangowithdjango.com/")
#
# frame_cat = add_cat("Other Frameworks")
#
# add_page(cat=frame_cat,
# title="Bottle",
# url="http://bottlepy.org/docs/dev/")
#
# add_page(cat=frame_cat,
# title="Flask",
# url="http://flask.pocoo.org")
#
# # Print out what we have added to the user.
# for c in Category.objects.all():
# for p in Page.objects.filter(category=c):
# print "- {0} - {1}".format(str(c), str(p))
#
# def add_page(cat, title, url, views=0):
# p = Page.objects.get_or_create(category=cat, title=title)[0]
# p.url=url
# p.views=views
# p.save()
# return p
#
# def add_cat(name):
# c = Category.objects.get_or_create(name=name)[0]
# return c
# Start execution here!
# if __name__ == '__main__':
# print "Starting Rango population script..."
# populate()
|
SelvorWhim/competitive
|
LeetCode/MaximumSubarray.py
|
Python
|
unlicense
| 588
| 0.003401
|
cl
|
ass Solution:
# returns sum of contiguous non-empty subarray with greatest sum
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
sumCurr = 0
sumMax = -math.inf
for i in range(len(nums)):
sumCurr += nums[i]
if sumCurr > sumMax: # this is checked before negativity in case entire array is negative - in that case th
|
e one with the least absolute value will be returned
sumMax = sumCurr
if sumCurr < 0:
sumCurr = 0
return sumMax
|
googleapis/python-websecurityscanner
|
samples/generated_samples/websecurityscanner_v1beta_generated_web_security_scanner_list_crawled_urls_async.py
|
Python
|
apache-2.0
| 1,618
| 0.001854
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version
|
2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS
|
IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListCrawledUrls
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-websecurityscanner
# [START websecurityscanner_v1beta_generated_WebSecurityScanner_ListCrawledUrls_async]
from google.cloud import websecurityscanner_v1beta
async def sample_list_crawled_urls():
# Create a client
client = websecurityscanner_v1beta.WebSecurityScannerAsyncClient()
# Initialize request argument(s)
request = websecurityscanner_v1beta.ListCrawledUrlsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_crawled_urls(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END websecurityscanner_v1beta_generated_WebSecurityScanner_ListCrawledUrls_async]
|
hzj123/56th
|
pombola/place_data/bin/import_place_data.py
|
Python
|
agpl-3.0
| 3,048
| 0.003609
|
#!/usr/bin/env python
import sys
import csv
import os
import decimal
os.environ['DJANGO_SETTINGS_MODULE'] = 'pombola.settings'
sys.path.append('../../../')
sys.path.append('../../')
from django.utils.text import slugify
from pombola.core.models import Place
from pombola.place_data.models import Entry
place_kind_slug = sys.argv[1]
filename = sys.argv[2]
csv_file = open(filename, 'rU')
csv_reader = csv.DictReader(csv_file, dialect='excel')
# Get rid of the padding around the fieldnames
csv_reader.fieldnames = [x.strip() for x in csv_reader.fieldnames]
for row in csv_reader:
try:
place_slug = row['slug'].strip()
except KeyError:
# If there's no slug column, try slugifying the name column
# This will currently only happen on the Counties - the constituency
# spreadsheet has slugs.
# If we needed this to work for constituencies, we'd have to not add
# -constituency on the end as they don't have that.
place_slug = slugify(row['name'].strip()) + '-' + place_kind_slug
# Check place with this slug exists and is of the right kind.
try:
place = Place.objects.get(slug=place_slug, kind__slug=place_kind_slug)
except Place.DoesNotExist:
print "Cannot find %s with slug %s, continuing with next place." % (place_k
|
ind_slug, place_slug)
continue
try:
data_row = Entry.objects.get(place=place)
except Entry.DoesNotExist:
data_row = Entry()
data_row.place = place
data_row.population_male = int(row['Male Population'])
data_row.population_female = int(row['Female Population'])
data_row.population_total = int(row['Total Population'])
data_row.population_rank = int(row['Population Rank 1=H
|
ighest'])
data_row.gender_index = decimal.Decimal(row['Gender Ration Women:Men'])
data_row.gender_index_rank = int(row['Women to Men Ratio Rank 1=Highest'])
data_row.households_total = int(row['Number of Households'])
data_row.average_household_size = decimal.Decimal(row['Average Houshold Size'])
data_row.household_size_rank = int(row['Household Size Rank 1=Highest'])
data_row.area = decimal.Decimal(row['Area in Sq. Km.'])
data_row.area_rank = int(row['Area Size Rank 1=Highest'])
data_row.population_density = decimal.Decimal(row['Density people per Sq. Km'])
data_row.population_density_rank = int(row['Population Density Rank 1=Highest'])
try:
data_row.registered_voters_total = int(row['Total Registered Voters'])
data_row.registered_voters_proportion = decimal.Decimal(row['Registered Voters as % of Population'])
data_row.registered_voters_proportion_rank = int(row['Registered Voters % Rank 1=Highest'])
data_row.youth_voters_proportion = decimal.Decimal(row['Youth Voters as a % of Total'])
data_row.youth_voters_proportion_rank = int(row['Youth Voters % Rank 1=Highest'])
except KeyError:
# One some kinds of place, such as Counties, these columns don't exist.
pass
data_row.save()
|
dropbox/changes
|
changes/api/serializer/models/cluster.py
|
Python
|
apache-2.0
| 336
| 0
|
from changes.api.serializer import Crumbler, register
from changes.models.node import Cluster
@register(
|
Cluster)
class ClusterCrumbler(Crumbler):
def crumble(self, instance, attrs):
return {
'id': instance.id.hex,
|
'name': instance.label,
'dateCreated': instance.date_created,
}
|
klynton/freight
|
freight/utils/workspace.py
|
Python
|
apache-2.0
| 2,839
| 0
|
from __future__ import absolute_import
import logging
import os
import shlex
import shutil
import sys
import traceback
from flask import current_app
from subprocess import PIPE, Popen, STDOUT
from uuid import uuid1
from freight.exceptions import CommandError
class Workspace(obje
|
ct):
log = logging.getLogger('workspace')
def __init__(self, path, log=None):
self.path = path
if log is not None:
self.log = log
def whereis(self, program, env):
for path in env.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and \
not os.path.isdir(os.path.join(pat
|
h, program)):
return os.path.join(path, program)
return None
def _get_writer(self, pipe):
if not isinstance(pipe, int):
pipe = pipe.fileno()
return os.fdopen(pipe, 'w')
def _run_process(self, command, *args, **kwargs):
stdout = kwargs.get('stdout', sys.stdout)
stderr = kwargs.get('stderr', sys.stderr)
kwargs.setdefault('cwd', self.path)
if isinstance(command, basestring):
command = shlex.split(command)
command = map(str, command)
env = os.environ.copy()
env['PYTHONUNBUFFERED'] = '1'
if kwargs.get('env'):
for key, value in kwargs['env'].iteritems():
env[key] = value
kwargs['env'] = env
kwargs['bufsize'] = 0
self.log.info('Running {}'.format(command))
try:
proc = Popen(command, *args, **kwargs)
except OSError as exc:
if not self.whereis(command[0], env):
msg = 'ERROR: Command not found: {}'.format(command[0])
else:
msg = traceback.format_exc()
raise CommandError(command, 1, stdout=None, stderr=msg)
return proc
def capture(self, command, *args, **kwargs):
kwargs['stdout'] = PIPE
kwargs['stderr'] = STDOUT
proc = self._run_process(command, *args, **kwargs)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise CommandError(command, proc.returncode, stdout, stderr)
return stdout
def run(self, command, *args, **kwargs):
proc = self._run_process(command, *args, **kwargs)
proc.wait()
if proc.returncode != 0:
raise CommandError(command, proc.returncode)
def remove(self):
if os.path.exists(self.path):
shutil.rmtree(self.path)
class TemporaryWorkspace(Workspace):
def __init__(self, *args, **kwargs):
path = os.path.join(
current_app.config['WORKSPACE_ROOT'],
'freight-workspace-{}'.format(uuid1().hex),
)
super(TemporaryWorkspace, self).__init__(path, *args, **kwargs)
|
datacode-taavi/python-spi
|
spi/ioctl_numbers.py
|
Python
|
mit
| 1,678
| 0.004768
|
"""
Source: http://code.activestate.com/recipes/578225-linux-ioctl-numbers-in-python/
Linux ioctl numbers made easy
size can be an integer or format string compatible with struct module
for example include/linux/watchdog.h:
#define WATCHDOG_IOCTL_BASE 'W'
struct watchdog_info {
__u32 options; /* Options the card/driver supports */
__u32 firmware_version; /* Firmware version of the card */
__u8 identity[32]; /* Identity of the board */
};
#define WDIOC_GETSUPPORT _IOR(WATCHDOG_IOCTL_BASE, 0, struct watchdog_info)
becomes:
WDIOC_GETSUPPORT = _IOR(ord('W'), 0, "=II32s")
"""
import struct
# constant for linux portability
_IOC_NRBITS = 8
_IOC_TYPEBITS = 8
# architecture specific
_IOC_SIZEBITS = 14
_IOC_DIRBITS = 2
_IOC_NRMASK = (1 << _IOC_NRBITS) - 1
_IOC_TYPEMASK = (1 << _IOC_TYPEBITS) - 1
_IOC_SIZEMASK = (1 << _IOC_SIZEBITS) - 1
_IOC_DIRMASK = (1 << _IOC_DIRBITS) - 1
_IOC_NRSHIFT = 0
_IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS
_IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS
_IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS
_IOC_NONE = 0
_IOC_WRITE = 1
_IOC_READ = 2
def _IOC(dir, type, nr, size):
if isinstance(size, str) or isinstance(size, unicode):
size = struct.calcsize(size)
return dir << _IOC_DIRSHIFT | \
type << _IOC_TYPESHIFT |
|
\
nr << _IOC_NRSHIFT | \
size << _IOC_SIZESHIFT
def _IO(type, nr): return _IOC(_IOC_NONE, type, nr, 0)
def _IOR(type, nr, size): return _IOC(_
|
IOC_READ, type, nr, size)
def _IOW(type, nr, size): return _IOC(_IOC_WRITE, type, nr, size)
def _IOWR(type, nr, size): return _IOC(_IOC_READ | _IOC_WRITE, type, nr, size)
|
USGSDenverPychron/pychron
|
pychron/experiment/notifier/user_notifier.py
|
Python
|
apache-2.0
| 6,871
| 0.001455
|
# # ===============================================================================
# # Copyright 2014 Jake Ross
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ===============================================================================
#
# # ============= enthought library imports =======================
# from traits.api import Instance, Bool, List
# # ============= standard library imports ========================
# from datetime import datetime
# import socket
# # ============= local library imports ==========================
# from pychron.experiment.events import ExperimentEventAddition, START_QUEUE, END_QUEUE
# from pychron.social.email.emailer import Emailer
# from pychron.version import __version__
# from pychron.experiment.notifier.templates import email_template
# from pychron.loggable import Loggable
#
#
# # class Emailer(HasTraits):
# # _server = None
# #
# # server_username = Str
# # server_password = Str
# # server_host = Str
# # server_port = Int
# # include_log = Bool
# # sender = Str('pychron@gmail.com')
# #
# # def send(self, addr, sub, msg):
# # server = self.connect()
# # if server:
# # msg = self._message_factory(addr, sub, msg)
# # try:
# # server.sendmail(self.sender, [addr], msg.as_string())
# # server.close()
# # return True
# # except BaseException:
# # pass
# #
# # def _message_factory(self, addr, sub, txt):
# # msg = MIMEMultipart()
# # msg['From'] = self.sender # 'nmgrl@gmail.com'
# # msg['To'] = addr
# # msg['Subject'] = sub
# #
# # msg.attach(MIMEText(txt))
# # return msg
# #
# # def connect(self):
# # if self._server is None:
# # try:
# # server = smtplib.SMTP(self.server_host, self.server_port)
# # server.ehlo()
# # server.starttls()
# # server.ehlo()
# #
# # server.login(self.server_username, self.server_password)
# # self._server = server
# # except smtplib.SMTPServerDisconnected:
# # return
# # else:
# # self._server.connect(self.server_host, self.server_port)
# #
# # return self._server
#
#
# class UserNotifier(Loggable):
# emailer = Instance(Emailer)
# include_log = Bool
# # events = List(contributes_to='pychron.experiment.events')
#
# def notify(self, ctx, subject):
# mctx = self._assemble_ctx(
|
**ctx)
# message = email_template(**mctx)
# self._send(ctx.get('email'), subject, message)
#
# if ctx.get('use_group_email'):
# pairs = ctx.get('group_emails')
# if pairs:
# names, addrs = pairs
# self.info('Notifying user group names={}'.format(','.join(names)))
#
|
for n, a in pairs:
# self._send(a, subject, message)
#
# # def _events_default(self):
# # print 'EVENTS DEFAULT'
# # evts = [ExperimentEventAddition(id='pychorn.user_notifier.start_queue',
# # action=self._start_queue,
# # level=START_QUEUE),
# # ExperimentEventAddition(id='pychorn.user_notifier.end_queue',
# # action=self._end_queue,
# # level=END_QUEUE)]
# # return evts
#
# # def notify(self, exp, last_runid, err, subject=None):
# # address = exp.email
# # if address:
# # subject, msg = self._assemble_message(exp, last_runid, err, subject)
# # self._notify(address, subject, msg)
#
# def _send(self, address, subject, msg):
# # self.debug('Subject= {}'.format(subject))
# # self.debug('Body= {}'.format(msg))
# if self.emailer:
# if not self.emailer.send(address, subject, msg):
# self.warning('email server not available')
# return True
# else:
# self.unique_warning('email plugin not enabled')
# return True
#
# # def notify_group(self, exp, last_runid, err, addrs, subject=None):
# # subject, msg = self._assemble_message(exp, last_runid, err, subject=None)
# # failed = list(addrs[:])
# # for email in addrs:
# # if self._notify(email, subject, msg):
# # break
# # failed.remove(email)
# #
# # if failed:
# # self.warning('Failed sending notification to emails {}'.join(','.join(failed)))
# #
# # def _assemble_message(self, exp, last_runid, err, subject):
# # name = exp.name
# # if subject is None:
# # if err:
# # subject = '{} Canceled'.format(name)
# # else:
# # subject = '{} Completed Successfully'.format(name)
# #
# # ctx = self._assemble_ctx(exp, last_runid, err)
# # msg = email_template(**ctx)
# # return subject, msg
#
# def _assemble_ctx(self, **kw):
# log = ''
# if self.include_log:
# log = self._get_log(100)
#
# shorthost = socket.gethostname()
# ctx = {'timestamp': datetime.now(),
# 'log': log,
# 'host': socket.gethostbyname(shorthost),
# 'shorthost': shorthost,
# 'version': __version__}
#
# ctx.update(kw)
# return ctx
#
# def _get_log(self, n):
# from pychron.core.helpers.logger_setup import get_log_text
# return get_log_text(n) or 'No log available'
#
#
# if __name__ == '__main__':
# class Exp(object):
# name = 'Foo'
# username = 'root'
# mass_spectrometer = 'jan'
# extract_device = 'co2'
# email = 'jirhiker@gmail.com'
# execution_ratio = '4/5'
#
# e = Exp()
# a = UserNotifier()
# a.emailer.include_log = True
# sub, msg = a._assemble_message(e, 'adsfafd', 'this is an error\nmultiomasdf')
# for l in msg.split('\n'):
# print l
#
#
# # a.notify(e, 'adsfafd', 'this is an error\nmultiomasdf')
# # ============= EOF =============================================
#
|
sofianehaddad/ot-svn
|
python/test/t_Mixture_std.py
|
Python
|
mit
| 7,198
| 0.002779
|
#! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
dimension = 3
meanPoint = NumericalPoint(dimension, 1.0)
meanPoint[0] = 0.5
meanPoint[1] = -0.5
sigma = NumericalPoint(dimension, 1.0)
sigma[0] = 2.0
sigma[1] = 3.0
R = CorrelationMatrix(dimension)
for i in range(1, dimension):
R[i, i - 1] = 0.5
# Create a collection of distribution
aCollection = DistributionCollection()
aCollection.add(Normal(meanPoint, sigma, R))
meanPoint += NumericalPoint(meanPoint.getDimension(), 1.0)
aCollection.add(Normal(meanPoint, sigma, R))
meanPoint += NumericalPoint(meanPoint.getDimension(), 1.0)
aCollection.add(Normal(meanPoint, sigma, R))
# Instanciate one distribution object
distribution = Mixture(
aCollection, NumericalPoint(aCollection.getSize(), 2.0))
print "Distribution ", repr(distribution)
print "Weights = ", repr(distribution.getWeights())
weights = distribution.getWeights()
weights[0] = 2.0 * weights[0]
distribution.setWeights(weights)
print "After update, new weights = ", repr(distribution.getWeights())
distribution = Mixture(aCollection)
print "Distribution ", repr(distribution)
# Is this distribution elliptical ?
print "Elliptical = ", distribution.isElliptical()
# Is this distribution continuous ?
print "Continuous = ", distribution.isContinuous()
# Test for realization of distribution
oneRealization = distribution.getRealization()
print "oneRealization=", repr(oneRealization)
# Test for sampling
size = 1000
oneSample = distr
|
ibution.getSample(size)
print "oneSample first=", r
|
epr(oneSample[0]), " last=", repr(oneSample[size - 1])
print "mean=", repr(oneSample.computeMean())
print "covariance=", repr(oneSample.computeCovariance())
# Define a point
point = NumericalPoint(distribution.getDimension(), 1.0)
print "Point= ", repr(point)
# Show PDF and CDF of point
eps = 1e-5
# derivative of PDF with regards its arguments
DDF = distribution.computeDDF(point)
print "ddf =", repr(DDF)
# by the finite difference technique
ddfFD = NumericalPoint(dimension)
for i in range(dimension):
left = NumericalPoint(point)
left[i] += eps
right = NumericalPoint(point)
right[i] -= eps
ddfFD[i] = (distribution.computePDF(left) -
distribution.computePDF(right)) / (2.0 * eps)
print "ddf (FD)=", repr(ddfFD)
# PDF value
LPDF = distribution.computeLogPDF(point)
print "log pdf=%.6f" % LPDF
PDF = distribution.computePDF(point)
print "pdf =%.6f" % PDF
# by the finite difference technique from CDF
if (dimension == 1):
print "pdf (FD)=%.6f" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) - distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps))
# derivative of the PDF with regards the parameters of the distribution
CDF = distribution.computeCDF(point)
print "cdf=%.6f" % CDF
CCDF = distribution.computeComplementaryCDF(point)
print "ccdf=%.6f" % CCDF
# PDFgr = distribution.computePDFGradient( point )
# print "pdf gradient =" , repr(PDFgr)
# by the finite difference technique
# PDFgrFD = NumericalPoint(4)
# PDFgrFD[0] = (Mixture(distribution.getR() + eps, distribution.getT(), distribution.getA(), distribution.getB()).computePDF(point) -
# Mixture(distribution.getR() - eps, distribution.getT(), distribution.getA(), distribution.getB()).computePDF(point)) / (2.0 * eps)
# PDFgrFD[1] = (Mixture(distribution.getR(), distribution.getT() + eps, distribution.getA(), distribution.getB()).computePDF(point) -
# Mixture(distribution.getR(), distribution.getT() - eps, distribution.getA(), distribution.getB()).computePDF(point)) / (2.0 * eps)
# PDFgrFD[2] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA() + eps, distribution.getB()).computePDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA() - eps, distribution.getB()).computePDF(point)) / (2.0 * eps)
# PDFgrFD[3] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() + eps).computePDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() - eps).computePDF(point)) / (2.0 * eps)
# print "pdf gradient (FD)=" , repr(PDFgrFD)
# derivative of the PDF with regards the parameters of the distribution
# CDFgr = distribution.computeCDFGradient( point )
# print "cdf gradient =" , repr(CDFgr)
# CDFgrFD = NumericalPoint(4)
# CDFgrFD[0] = (Mixture(distribution.getR() + eps, distribution.getT(), distribution.getA(), distribution.getB()).computeCDF(point) -
# Mixture(distribution.getR() - eps, distribution.getT(), distribution.getA(), distribution.getB()).computeCDF(point)) / (2.0 * eps)
# CDFgrFD[1] = (Mixture(distribution.getR(), distribution.getT() + eps, distribution.getA(), distribution.getB()).computeCDF(point) -
# Mixture(distribution.getR(), distribution.getT() - eps, distribution.getA(), distribution.getB()).computeCDF(point)) / (2.0 * eps)
# CDFgrFD[2] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA() + eps, distribution.getB()).computeCDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA() - eps, distribution.getB()).computeCDF(point)) / (2.0 * eps)
# CDFgrFD[3] = (Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() + eps).computeCDF(point) -
# Mixture(distribution.getR(), distribution.getT(), distribution.getA(), distribution.getB() - eps).computeCDF(point)) / (2.0 * eps)
# print "cdf gradient (FD)=", repr(CDFgrFD)
# quantile
quantile = distribution.computeQuantile(0.95)
print "quantile=", repr(quantile)
print "cdf(quantile)=%.6f" % distribution.computeCDF(quantile)
mean = distribution.getMean()
print "mean=", repr(mean)
covariance = distribution.getCovariance()
print "covariance=", repr(covariance)
parameters = distribution.getParametersCollection()
print "parameters=", repr(parameters)
for i in range(6):
print "standard moment n=", i, " value=", distribution.getStandardMoment(i)
print "Standard representative=", distribution.getStandardRepresentative()
# Constructor with separate weights. Also check small weights removal
weights = [1.0e-20, 2.5, 32.0]
atoms = DistributionCollection(
[Normal(1.0, 1.0), Normal(2.0, 2.0), Normal(3.0, 3.0)])
newMixture = Mixture(atoms, weights)
print "newMixture pdf= %.12g" % newMixture.computePDF(2.5)
print "atoms kept in mixture=", newMixture.getDistributionCollection()
print "newMixture=", newMixture
except:
import sys
print "t_Mixture_std.py", sys.exc_type, sys.exc_value
|
seung-lab/euclidean-distance-transform-3d
|
python/setup.py
|
Python
|
gpl-3.0
| 779
| 0.014121
|
import setuptools
import sys
import numpy as np
# NOTE: If edt.cpp does not exist:
# cython -3 --fast-fail -v --cplus edt.pyx
extra_compile_args = []
if sys.platform == 'win32':
extra_compile_args += [
'/std:c++11', '/O2'
]
else:
extra_compile_args += [
'-std=c++11', '-O3', '-ffast-math', '-pthread'
]
if sys.platform == 'darwin':
extra_compile_args += [ '-stdlib=libc++', '-mmacosx-version-min=10.9' ]
setuptools.setup(
setup_requires=['pbr'],
python_requires="~=3.6", # >= 3.6 < 4.0
ext_modules=[
setuptools.Extension(
'edt',
sources=[ 'edt.cpp' ],
language='c++',
include_dirs=[ np.get_include() ],
|
extra_compile_args=extra_compile_args,
),
],
long_descripti
|
on_content_type='text/markdown',
pbr=True
)
|
abigailStev/stingray
|
stingray/largememory.py
|
Python
|
mit
| 34,861
| 0.000832
|
import os
import random
import string
import warnings
import numpy as np
from astropy import log
from astropy.io import fits
import stingray
from .events import EventList
from .gti import cross_two_gtis
from .io import high_precision_keyword_read
from .lightcurve import Lightcurve
from .utils import genDataPath
HAS_ZARR = False
try:
import zarr
HAS_ZARR = True
from numcodecs import Blosc
except ImportError:
zarr = None
pass
__all__ = ["createChunkedSpectra", "saveData", "retrieveData"]
def _saveChunkLC(lc, dir_name, chunks):
"""
Save Lightcurve in chunks on disk.
Parameters
----------
lc: :class:`stingray.Lightcurve` object
Lightcurve to be saved
dir_name: string
Top Level diretory name where Lightcurve is to be saved
chunks: int
The number of elements per chunk
"""
# Creating a Nested Store and multiple groups for temporary saving
store = zarr.NestedDirectoryStore(dir_name)
lc_data_group = zarr.group(store=store, overwrite=True)
main_data_group = lc_data_group.create_group("main_data", overwrite=True)
meta_data_group = lc_data_group.create_group("meta_data", overwrite=True)
compressor = Blosc(cname="lz4", clevel=1, shuffle=-1) # Optimal
main_data_group.create_dataset(
name="times",
data=lc.time,
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
main_data_group.create_dataset(
name="counts",
data=lc.counts,
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
if lc._counts_err is not None:
main_data_group.create_dataset(
name="count_err",
data=lc.counts_err,
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
main_data_group.create_dataset(
name="gti", data=lc.gti.flatten(), overwrite=True
)
meta_data_group.create_dataset(
name="dt", data=lc.dt, compressor=compressor, overwrite=True
)
meta_data_group.create_dataset(
name="err_dist",
data=lc.err_dist,
compressor=compressor,
overwrite=True,
)
meta_data_group.create_dataset(
name="mjdref", data=lc.mjdref, compressor=compressor, overwrite=True
)
def _saveChunkEV(ev, dir_name, chunks):
"""
Save EventList in chunks on disk.
Parameters
----------
ev: :class:`stingray.events.EventList` object
EventList to be saved
dir_name: string
Top Level diretory name where EventList is to be saved
chunks: int
The number of elements per chunk
Raises
------
ValueError
If there is no data being saved
"""
# Creating a Nested Store and multiple groups for temporary saving
store = zarr.NestedDirectoryStore(dir_name)
ev_data_group = zarr.group(store=store, overwrite=True)
main_data_group = ev_data_group.create_group("main_data", overwrite=True)
meta_data_group = ev_data_group.create_group("meta_data", overwrite=True)
compressor = Blosc(cname="lz4", clevel=1, shuffle=-1)
if ev.time is not None and (ev.time.all() or ev.time.size != 0):
main_data_group.create_dataset(
name="times",
data=ev.time,
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
if ev.energy is not None and (ev.energy.all() or ev.energy.size != 0):
main_data_group.create_dataset(
name="energy",
data=ev.energy,
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
if ev.pi is not None and (ev.pi.all() or ev.pi.size != 0):
main_data_group.create_dataset(
name="pi_channel",
data=ev.pi,
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
if ev.gti is not None and (ev.gti.all() or ev.gti.shape[0] != 0):
main_data_group.create_dataset(
name="gti", data=ev.gti.flatten(), overwrite=True
)
if ev.dt != 0:
meta_data_group.create_dataset(
name="dt", data=ev.dt, compressor=compressor, overwrite=True
)
if ev.ncounts:
meta_data_group.create_dataset(
name="ncounts",
data=ev.ncounts,
compressor=compressor,
overwrite=True,
)
if ev.notes:
meta_data_group.create_dataset(
name="notes", data=ev.notes, compressor=compressor, overwrite=True
)
meta_data_group.create_dataset(
name="mjdref", data=ev.mjdref, compressor=compressor, overwrite=True
)
def _saveFITSZarr(f_name, dir_name, chunks):
"""
Read a FITS file and save it for further processing.
Parameters
----------
f_name: string
The name of file with which object was saved
dir_name: string
The name of the top level directory where the file is to be stored
chunks: int
The number of elements per chunk
"""
compressor = Blosc(cname="lz4", clevel=1, shuffle=-1)
store = zarr.NestedDirectoryStore(dir_name)
fits_data_group = zarr.group(store=store, overwrite=True)
main_data_group = fits_data_group.create_group("main_data", overwrite=True)
meta_data_group = fits_data_group.create_group("meta_data", overwrite=True)
with fits.open(f_name, memmap=True) as fits_data:
for HDUList in fits_data:
if HDUList.name == "EVENTS":
times = HDUList.data["TIME"]
chunks = times.size if times.size < chunks else chunks
main_data_group.create_dataset(
name="times",
data=times,
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
for col in ["PI", "PHA"]:
if col in HDUList.data.columns.names:
main_data_group.create_dataset(
name=f"{col.lower()}_channel",
data=HDUList.data[col],
compressor=compressor,
overwrite=True,
chunks=(chunks,),
)
meta_data_group.create_dataset(
name="tstart",
data=HDUList.header["TSTART"],
compressor=compressor,
overwrite=True,
)
meta_data_group.create_dataset(
name="tstop",
data=HDUList.header["TSTOP"],
compressor=compressor,
overwrite=True,
)
meta_data_group.create_dataset(
name="mjdref",
data=high_precision_keyword_rea
|
d(HDUList.header, "MJDREF"),
compressor=compressor,
overwrite=True,
)
elif
|
HDUList.name == "GTI":
# TODO: Needs to be generalized
start, stop = HDUList.data["START"], HDUList.data["STOP"]
gti = np.array(list(zip(start, stop)))
main_data_group.create_dataset(
name="gti",
data=gti.flatten(),
compressor=compressor,
overwrite=True,
)
def saveData(data, persist=False, dir_name=None, chunks=None):
"""
Saves Lightcurve/EventList or any such data in chunks to disk.
Parameters
----------
data: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList` object or string
Data to be stored on the disk.
persist: bool
If the data is to be stored on the disk permanently.
dir_name: string, optional
Name of top level directory where data is to be stored, by default randomNameGenerate()
chunks: int, optional
Length of data chunks in number of bins. If None, it is calculated
based on the system reso
|
vmendez/DIRAC
|
Core/scripts/dirac-configure.py
|
Python
|
gpl-3.0
| 20,061
| 0.037236
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-configure
# Author : Ricardo Graciani
########################################################################
"""
Main script to write dirac.cfg for a new DIRAC installation and initial download of CAs and CRLs
if necessary.
To be used by VO specific scripts to configure new DIRAC installations
There are 2 mandatory arguments:
-S --Setup=<setup> To define the DIRAC setup for the current installation
-C --ConfigurationServer=<server>|-W --Gateway To define the reference Configuration Servers/Gateway for the current installation
others are optional
-I --IncludeAllServers To include all Configuration Servers (by default only those in -C option are included)
-n --SiteName=<sitename> To define the DIRAC Site Name for the installation
-N --CEName=<cename> To determine the DIRAC Site Name from the CE Name
-V --VO=<vo> To define the VO for the installation
-U --UseServerCertificate To use Server Certificate for all clients
-H --SkipCAChecks To skip check of CAs for all clients
-D --SkipCADownload To skip download of CAs
-M --SkipVOMSDownload To skip download of VOMS info
-v --UseVersionsDir Use versions directory (This option will properly define RootPath and InstancePath)
-A --Architecture=<architecture> To define /LocalSite/Architecture=<architecture>
-L --LocalSE=<localse> To define /LocalSite/LocalSE=<localse>
-F --ForceUpdate Forces the update of cfg file (i.e. dirac.cfg), even if it does already exists (use with care)
-O --Output define output configuration file
Other arguments will take proper defaults if not defined.
Additionally all options can all be passed inside a .cfg file passed as argument. The following options are recognized:
Setup
ConfigurationServer
IncludeAllServers
Gateway
SiteName
CEName
VirtualOrganization
UseServerCertificate
SkipCAChecks
SkipCADownload
UseVersionsDir
Architecture
LocalSE
LogLevel
As in any other script command line option take precedence over .cfg files passed as arguments.
The combination of both is written into the installed dirac.cfg.
Notice: It will not overwrite exiting info in current dirac.cfg if it exists.
Example: dirac-configure -d -S LHCb-Development -C 'dips://lhcbprod.pic.es:9135/Configuration/Server' -W 'dips://lhcbprod.pic.es:9135' --SkipCAChecks
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers import cfgInstallPath, cfgPath, Registry
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
import sys, os
logLevel = None
setup = None
configurationServer = None
includeAllServers = False
gatewayServer = None
siteName = None
useServerCert = False
skipCAChecks = False
skipCADownload = False
useVersionsDir = False
architecture = None
localSE = None
ceName = None
vo = None
update = False
outputFile = ''
skipVOMSDownload = False
def setGateway( optionValue ):
global gatewayServer
gatewayServer = optionValue
setServer( gatewayServer + '/Configuration/Server' )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'Gateway' ), gatewayServer )
return DIRAC.S_OK()
def setOutput( optionValue ):
global outputFile
outputFile = optionValue
return DIRAC.S_OK()
def setServer( optionValue ):
global configurationServer
configurationServer = optionValue
Script.localCfg.addDefaultEntry( '/DIRAC/Configuration/Servers', configurationServer )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'ConfigurationServer' ), configurationServer )
return DIRAC.S_OK()
def setAllServers( optionValue ):
global includeAllServers
includeAllServers = True
def setSetup( optionValue ):
global setup
setup = optionValue
DIRAC.gConfig.setOptionValue( '/DIRAC/Setup', setup )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'Setup' ), setup )
return DIRAC.S_OK()
def setSiteName( optionValue ):
global siteName
siteName = optionValue
Script.localCfg.addDefaultEntry( '/LocalSite/Site', siteName )
DIRAC.__siteName = False
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'SiteName' ), siteName )
return DIRAC.S_OK()
def setCEName( optionValue ):
global ceName
ceName = optionValue
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'CEName' ), ceName )
return DIRAC.S_OK()
def setServerCert( optionValue ):
global useServerCert
useServerCert = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'UseServerCertificate' ), useServerCert )
return DIRAC.S_OK()
def setSkipCAChecks( optionValue ):
global skipCAChecks
skipCAChecks = True
DIRAC.gConfi
|
g.setOptionValue( cfgInstallPath( 'SkipCAChecks' ), skipCAChecks )
return DIRAC.S_OK()
def setSkipCADownload( optionValue ):
|
global skipCADownload
skipCADownload = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'SkipCADownload' ), skipCADownload )
return DIRAC.S_OK()
def setSkipVOMSDownload( optionValue ):
global skipVOMSDownload
skipVOMSDownload = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'SkipVOMSDownload' ), skipVOMSDownload )
return DIRAC.S_OK()
def setUseVersionsDir( optionValue ):
global useVersionsDir
useVersionsDir = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'UseVersionsDir' ), useVersionsDir )
return DIRAC.S_OK()
def setArchitecture( optionValue ):
global architecture
architecture = optionValue
Script.localCfg.addDefaultEntry( '/LocalSite/Architecture', architecture )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'Architecture' ), architecture )
return DIRAC.S_OK()
def setLocalSE( optionValue ):
global localSE
localSE = optionValue
Script.localCfg.addDefaultEntry( '/LocalSite/LocalSE', localSE )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'LocalSE' ), localSE )
return DIRAC.S_OK()
def setVO( optionValue ):
global vo
vo = optionValue
Script.localCfg.addDefaultEntry( '/DIRAC/VirtualOrganization', vo )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'VirtualOrganization' ), vo )
return DIRAC.S_OK()
def forceUpdate( optionValue ):
global update
update = True
return DIRAC.S_OK()
Script.disableCS()
Script.registerSwitch( "S:", "Setup=", "Set <setup> as DIRAC setup", setSetup )
Script.registerSwitch( "C:", "ConfigurationServer=", "Set <server> as DIRAC configuration server", setServer )
Script.registerSwitch( "I", "IncludeAllServers", "include all Configuration Servers", setAllServers )
Script.registerSwitch( "n:", "SiteName=", "Set <sitename> as DIRAC Site Name", setSiteName )
Script.registerSwitch( "N:", "CEName=", "Determiner <sitename> from <cename>", setCEName )
Script.registerSwitch( "V:", "VO=", "Set the VO name", setVO )
Script.registerSwitch( "W:", "gateway=", "Configure <gateway> as DIRAC Gateway for the site", setGateway )
Script.registerSwitch( "U", "UseServerCertificate", "Configure to use Server Certificate", setServerCert )
Script.registerSwitch( "H", "SkipCAChecks", "Configure to skip check of CAs", setSkipCAChecks )
Script.registerSwitch( "D", "SkipCADownload", "Configure to skip download of CAs", setSkipCADownload )
Script.registerSwitch( "M", "SkipVOMSDownload", "Configure to skip download of VOMS info", setSkipVOMSDownload )
Script.registerSwitch( "v", "UseVersionsDir", "Use versions directory", setUseVersionsDir )
Script.registerSwitch( "A:", "Architecture=", "Configure /Architecture=<architecture>", setArchitecture )
Script.registerSwitch( "L:", "LocalSE=", "Configure LocalSite/LocalSE=<localse>", setLocalSE )
Script.registerSwitch( "F", "ForceUpdate", "Force Update of cfg file (i.e. dirac.cfg) (otherwise nothing happens if dirac.cfg already exists)", forceUpdate )
Script.registerSwitch ( "O:
|
rgayon/plaso
|
tests/output/json_out.py
|
Python
|
apache-2.0
| 4,842
| 0.002891
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the JSON output module."""
from __future__ import unicode_literals
import json
import os
import sys
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.formatters import manager as formatters_manager
from plaso.lib import definitions
from plaso.output import json_out
from tests import test_lib as shared_test_lib
from tests.cli import test_lib as cli_test_lib
from tests.containers import test_lib as containers_test_lib
from tests.formatters import test_lib as formatters_test_lib
from tests.output import test_lib
class JSONOutputTest(test_lib.OutputModuleTestCase):
"""Tests for the JSON output module."""
_OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd')))
_TEST_EVENTS = [
{'data_type': 'test:event',
'display_name': 'OS: /var/log/syslog.1',
'hostname': 'ubuntu',
'inode': 12345678,
'path_spec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'}]
def setUp(self):
"""Makes preparations before running an individual test."""
output_mediator = self._CreateOutputMediator()
self._output_writer = cli_test_lib.TestOutputWriter()
self._output_module = json_out.JSONOutputModule(output_mediator)
self._output_module.SetOutputWriter(self._output_writer)
def testWriteHeader(self):
"""Tests the WriteHeader function."""
expected_header = '{'
self._output_module.WriteHeader()
header = self._output_writer.ReadOutput()
self.assertEqual(header, expected_header)
def testWriteFooter(self):
"""Tests the WriteFooter function."""
expected_footer = '}'
self._output_module.WriteFooter()
footer = self._output_writer.ReadOutput()
self.assertEqual(footer, expected_footer)
def testWriteEventBody(self):
"""Tests the WriteEventBody function."""
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
formatters_manager.FormattersManager.RegisterFormatter(
formatters_test_lib.TestEventFormatter)
try:
self._output_module.WriteEventBody(
event, event_data, event_data_stream, None)
finally:
formatters_manager.FormattersManager.DeregisterFormatter(
formatters_test_lib.TestEventFormatter)
expected_timestamp = shared_test_lib.CopyTimestampFromSring(
'2012-06-27 18:17:01')
if sys.platform.startswith('win'):
# The dict comparison is very picky on Windows hence we
# have to make sure the drive letter is in the same case.
expected_os_location = os.path.abspath('\\{0:s}'.format(
os.path.join('cases', 'image.dd')))
else:
expected_os_location = '{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd'))
expected_json_dict = {
'event_0': {
'__container_type__': 'event',
'__type__': 'AttributeContainer',
'data_type': 'test:event',
'display_name': 'OS: /var/log/syslog.1',
'hostname': 'ubuntu',
'inode': 12345678,
'message': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
'session closed for user root)'),
'pathspec': {
'__type__': 'PathSpec',
'type_indicator': 'TSK',
'location': '/var/log/syslog.1',
'inode': 15,
'parent': {
'__type__': 'PathSpec',
'type_indicator': 'OS',
'location': expected_os_location,
}
},
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
'session\n closed for user root)'),
'timestamp': expected_timestamp,
|
't
|
imestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root',
}
}
event_body = self._output_writer.ReadOutput()
# We need to compare dicts since we cannot determine the order
# of values in the string.
json_string = '{{ {0:s} }}'.format(event_body)
json_dict = json.loads(json_string)
self.assertEqual(json_dict, expected_json_dict)
if __name__ == '__main__':
unittest.main()
|
adviti/melange
|
thirdparty/google_appengine/lib/webapp2/tests/extras_jinja2_test.py
|
Python
|
apache-2.0
| 3,283
| 0.000305
|
# -*- coding: utf-8 -*-
import os
import webapp2
from webapp2_extras import jinja2
import test_base
current_dir = os.path.abspath(os.path.dirname(__file__))
template_path = os.path.join(current_dir, 'resources', 'jinja2_templates')
compiled_path = os.path.join(current_dir, 'resources',
'jinja2_templates_compiled')
class TestJinja2(test_base.BaseTestCase):
def test_render_template_with_i18n(self):
app = webapp2.WSGIApplication(config={
'webapp2_extras.jinja2': {
'template_path': template_path,
'environment_args': {
'autoescape': True,
'extensions': [
'jinja2.ext.autoescape',
'jinja2.ext.with_',
'jinja2.ext.i18n',
],
},
},
})
req = webapp2.Request.blank('/')
app.set_globals(app=app, request=req)
j = jinja2.Jinja2(app)
message = 'Hello, i18n World!'
res = j.render_template('template2.html', message=message)
self.assertEqual(res, message)
def test_render_template_globals_filters(self):
app = webapp2.WSGIApplication(config={
'webapp2_extras.jinja2': {
'template_path': template_path,
'globals': dict(foo='fooglobal'),
'filters': dict(foo=lambda x: x + '-foofilter'),
},
})
req = webapp2.Request.blank('/')
app.set_globals(app=app, request=req)
j = jinja2.Jinja2(app)
message = 'fooglobal-foofilter'
res = j.render_template('template3.html', message=message)
self.assertEqual(res, message)
def test_render_template_force_compiled(self):
app = webapp2.WSGIApplication(config={
'webapp2_extras.jinja2': {
'template_path': template_path,
'compiled_path': compiled_path,
'force_compiled': True,
}
})
req = webapp2.Request.blank('/')
app.set_globals(app=app, request=req)
j = jinja2.Jinja2(app)
message = 'Hello, World!'
res = j.render_template('template1.html', message=message)
self.assertEqual(res, message)
def test_get_template_attribute(self):
app = webapp2.WSGIApplication(config={
'webapp2_extras.jinja2': {
'template_path': template_path,
}
})
j = jinja2.Jinja2(app)
hello = j.get_template_attribute('hello.html', 'hello')
self.assertEqual(hello('World'), 'Hello, World!')
def test_set_jinja2(self):
app = webapp2.WSGIApplication()
self.assertEqual(len(app.registry), 0)
jinja2.set_jinja2(jinja2.Jinja2(app), app=app)
self.assertEqual(len(app.registry), 1)
|
j = jinja2.g
|
et_jinja2(app=app)
self.assertTrue(isinstance(j, jinja2.Jinja2))
def test_get_jinja2(self):
app = webapp2.WSGIApplication()
self.assertEqual(len(app.registry), 0)
j = jinja2.get_jinja2(app=app)
self.assertEqual(len(app.registry), 1)
self.assertTrue(isinstance(j, jinja2.Jinja2))
if __name__ == '__main__':
test_base.main()
|
open-craft/xblock-poll
|
tests/utils.py
|
Python
|
agpl-3.0
| 708
| 0.002825
|
# Test mocks and helpers
from __future__ import absolute_import
from webob import Request
f
|
rom xblock.runtime import DictKeyValueStore, KvsFieldData
from xblock.test.tools import TestRuntime
def make_request(body, method='POST'):
"""
Helper method to make request
"""
request = Request.blank('/')
request.body = body.encode('utf-8')
request.method = method
return request
# pylint: disable=abstract-method
class MockRuntime(TestRuntime):
"""
Provides a mock XBlock runtime object.
"""
def __init__(self, *
|
*kwargs):
field_data = kwargs.get('field_data', KvsFieldData(DictKeyValueStore()))
super(MockRuntime, self).__init__(field_data=field_data)
|
tsl143/zamboni
|
mkt/abuse/models.py
|
Python
|
bsd-3-clause
| 3,973
| 0
|
import logging
from django.conf import settings
from django.db import models
from mkt.site.mail import send_mail
from mkt.site.models import ModelBase
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
log = logging.getLogger('z.abuse')
class AbuseReport(ModelBase):
# NULL if the reporter is anonymous.
reporter = models.ForeignKey(UserProfile, null=True,
blank=True, related_name='abuse_reported')
ip_address = models.CharField(max_length=255, default='0.0.0.0')
# An abuse report can be for an addon, a user, or a website. Only one of
# these should be set.
addon = models.ForeignKey(Webapp, null=True, related_name='abuse_reports')
user = models.ForeignKey(UserProfile, null=True,
related_name='abuse_reports')
website = models.ForeignKey(Website, null=True,
related_name='abuse_reports')
message = models.TextField()
read = models.BooleanField(default=False)
class Meta:
db_table = 'abuse_reports'
@property
def object(self):
return self.addon or self.user or self.website
def send(self):
obj = self.object
if self.reporter:
user_name = '%s (%s)' % (self.reporter.name, self.reporter.email)
else:
user_name = 'An anonymous user'
if self.website:
# For Websites, it's not just abuse, the scope is broader, it could
# be any issue about the website listing itself, so use a different
# wording and recipient list.
type_ = u'Website'
subject = u'[%s] Issue Report for %s' % (type_, obj.name)
recipient_list = (settings.MKT_FEEDBACK_EMAIL,)
else:
if self.addon:
type_ = 'App'
elif self.user:
type_ = 'User'
subject = u'[%s] Abuse Report for %s' % (type_, obj.name)
recipient_list = (settings.ABUSE_EMAIL,)
msg = u'%s reported an issue for %s (%s%s).\n\n%s' % (
|
user_name, obj.name, settings.SITE_URL, obj.get_url_path(),
self.message)
send_mail(subject, msg, recipient_list=recipient_list)
@classmethod
def recent_high_abuse_reports(cls, threshold, period, addon_id=None):
"""
Returns AbuseReport objects for the given threshold over the given time
period (in days). Filters by addon_id if provided.
E.g. Greater than 5 abuse reports for all webapps in the past 7 days.
"""
|
abuse_sql = ['''
SELECT `abuse_reports`.*,
COUNT(`abuse_reports`.`addon_id`) AS `num_reports`
FROM `abuse_reports`
INNER JOIN `addons` ON (`abuse_reports`.`addon_id` = `addons`.`id`)
WHERE `abuse_reports`.`created` >= %s ''']
params = [period]
if addon_id:
abuse_sql.append('AND `addons`.`id` = %s ')
params.append(addon_id)
abuse_sql.append('GROUP BY addon_id HAVING num_reports > %s')
params.append(threshold)
return list(cls.objects.raw(''.join(abuse_sql), params))
# Add index on `created`.
AbuseReport._meta.get_field('created').db_index = True
def send_abuse_report(request, obj, message):
report = AbuseReport(ip_address=request.META.get('REMOTE_ADDR'),
message=message)
if request.user.is_authenticated():
report.reporter = request.user
if isinstance(obj, Webapp):
report.addon = obj
elif isinstance(obj, UserProfile):
report.user = obj
elif isinstance(obj, Website):
report.website = obj
report.save()
report.send()
# Trigger addon high abuse report detection task.
if isinstance(obj, Webapp):
from mkt.webapps.tasks import find_abuse_escalations
find_abuse_escalations.delay(obj.id)
|
CSC301H-Fall2013/ElectionSimulation
|
Code/Database/Build/build_lists.py
|
Python
|
mit
| 1,031
| 0.054316
|
if __name__ == '__main__':
expenses_list = []
expenses = open('DummyExpense.csv', 'r')
#Skip header
expenses.readline()
for line in expenses.readlines():
expenses_list.append([elem.rsplit("\r\n")[0] for elem in line.
|
split(',')])
expenses.close()
ctracts_list = []
ctracts = open('DummyCT.csv', 'r')
#Skip header
ctracts.readline()
tract = []
for line in ctracts.readlines():
tract = line.split(',')
tract = [tract[0]] + tract[4:-1]
ctracts_list.append(tract)
ctracts.close()
pstation_list = []
pstations = open('DummyPS.csv',
|
'r')
#Skip header
pstations.readline()
for line in pstations.readlines():
pstation_list.append([elem.rsplit("\r\n")[0] for elem in line.split(',')])
pstations.close()
links_list = []
links = open('DummyLink.csv', 'r')
#Skip header
links.readline()
for line in links.readlines():
links_list.append([elem.rsplit("\r\n")[0] for elem in line.split(',')])
links.close()
print(expenses_list)
print(ctracts_list)
print(pstation_list)
print(links_list)
|
rndusr/stig
|
stig/tui/scroll.py
|
Python
|
gpl-3.0
| 16,073
| 0.001058
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
import urwid
from urwid.widget import BOX, FIXED, FLOW
# Scroll actions
SCROLL_LINE_UP = 'line up'
SCROLL_LINE_DOWN = 'line down'
SCROLL_PAGE_UP = 'page up'
SCROLL_PAGE_DOWN = 'page down'
SCROLL_TO_TOP = 'to top'
SCROLL_TO_END = 'to end'
# Scrollbar positions
SCROLLBAR_LEFT = 'left'
SCROLLBAR_RIGHT = 'right'
class Scrollable(urwid.WidgetDecoration):
def sizing(self):
return frozenset([BOX,])
def selectable(self):
return True
def __init__(self, widget):
"""Box widget that makes a fixed or flow widget vertically scrollable
TODO: Focusable widgets are handled, including switching focus, but
possibly not intuitively, depending on the arrangement of widgets. When
switching focus to a widget that is ouside of the visible part of the
original widget, the canvas scrolls up/down to the focused widget. It
would be better to scroll until the next focusable widget is in sight
first. But for that to work we must somehow obtain a list of focusable
rows in the original canvas.
"""
if not any(s in widget.sizing() for s in (FIXED, FLOW)):
raise ValueError('Not a fixed or flow widget: %r' % widget)
self._trim_top = 0
self._scroll_action = None
self._forward_keypress = None
self._old_cursor_coords = None
self._rows_max_cached = 0
self.__super.__init__(widget)
def render(self, size, focus=False):
maxcol, maxrow = size
# Render complete original widget
ow
|
= self._original_widget
ow_size = self._get_original_widget_size(size)
canv_full = ow.render(ow_size, focus)
# Make full canvas editable
canv = urwid.CompositeCanvas(canv_full)
canv_cols, canv_rows = canv.cols(), canv.rows()
if canv_cols <= maxcol:
pad_width = maxcol - canv_cols
if pad_width > 0:
# Canvas is narrower than available horizontal space
canv.pad_trim_left_right(0, pad_width)
|
if canv_rows <= maxrow:
fill_height = maxrow - canv_rows
if fill_height > 0:
# Canvas is lower than available vertical space
canv.pad_trim_top_bottom(0, fill_height)
if canv_cols <= maxcol and canv_rows <= maxrow:
# Canvas is small enough to fit without trimming
return canv
self._adjust_trim_top(canv, size)
# Trim canvas if necessary
trim_top = self._trim_top
trim_end = canv_rows - maxrow - trim_top
trim_right = canv_cols - maxcol
if trim_top > 0:
canv.trim(trim_top)
if trim_end > 0:
canv.trim_end(trim_end)
if trim_right > 0:
canv.pad_trim_left_right(0, -trim_right)
# Disable cursor display if cursor is outside of visible canvas parts
if canv.cursor is not None:
curscol, cursrow = canv.cursor
if cursrow >= maxrow or cursrow < 0:
canv.cursor = None
# Figure out whether we should forward keypresses to original widget
if canv.cursor is not None:
# Trimmed canvas contains the cursor, e.g. in an Edit widget
self._forward_keypress = True
else:
if canv_full.cursor is not None:
# Full canvas contains the cursor, but scrolled out of view
self._forward_keypress = False
else:
# Original widget does not have a cursor, but may be selectable
# FIXME: Using ow.selectable() is bad because the original
# widget may be selectable because it's a container widget with
# a key-grabbing widget that is scrolled out of view.
# ow.selectable() returns True anyway because it doesn't know
# how we trimmed our canvas.
#
# To fix this, we need to resolve ow.focus and somehow
# ask canv whether it contains bits of the focused widget. I
# can't see a way to do that.
if ow.selectable():
self._forward_keypress = True
else:
self._forward_keypress = False
return canv
def keypress(self, size, key):
# Maybe offer key to original widget
if self._forward_keypress:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
# Remember previous cursor position if possible
if hasattr(ow, 'get_cursor_coords'):
self._old_cursor_coords = ow.get_cursor_coords(ow_size)
key = ow.keypress(ow_size, key)
if key is None:
return None
# Handle up/down, page up/down, etc
command_map = self._command_map
if command_map[key] == urwid.CURSOR_UP:
self._scroll_action = SCROLL_LINE_UP
elif command_map[key] == urwid.CURSOR_DOWN:
self._scroll_action = SCROLL_LINE_DOWN
elif command_map[key] == urwid.CURSOR_PAGE_UP:
self._scroll_action = SCROLL_PAGE_UP
elif command_map[key] == urwid.CURSOR_PAGE_DOWN:
self._scroll_action = SCROLL_PAGE_DOWN
elif command_map[key] == urwid.CURSOR_MAX_LEFT: # 'home'
self._scroll_action = SCROLL_TO_TOP
elif command_map[key] == urwid.CURSOR_MAX_RIGHT: # 'end'
self._scroll_action = SCROLL_TO_END
else:
return key
self._invalidate()
def mouse_event(self, size, event, button, col, row, focus):
ow = self._original_widget
if hasattr(ow, 'mouse_event'):
ow_size = self._get_original_widget_size(size)
row += self._trim_top
return ow.mouse_event(ow_size, event, button, col, row, focus)
else:
return False
def _adjust_trim_top(self, canv, size):
"""Adjust self._trim_top according to self._scroll_action"""
action = self._scroll_action
self._scroll_action = None
maxcol, maxrow = size
trim_top = self._trim_top
canv_rows = canv.rows()
if trim_top < 0:
# Negative trim_top values use bottom of canvas as reference
trim_top = canv_rows - maxrow + trim_top + 1
if canv_rows <= maxrow:
self._trim_top = 0 # Reset scroll position
return
def ensure_bounds(new_trim_top):
return max(0, min(canv_rows - maxrow, new_trim_top))
if action == SCROLL_LINE_UP:
self._trim_top = ensure_bounds(trim_top - 1)
elif action == SCROLL_LINE_DOWN:
self._trim_top = ensure_bounds(trim_top + 1)
elif action == SCROLL_PAGE_UP:
self._trim_top = ensure_bounds(trim_top - maxrow + 1)
elif action == SCROLL_PAGE_DOWN:
self._trim_top = ensure_bounds(trim_top + maxrow - 1)
elif action == SCROLL_TO_TOP:
self._trim_top = 0
elif action == SCROLL_TO_END:
self._trim_top = canv_rows - maxrow
else:
self._trim_top = ensure_bounds(trim_top)
# If the cursor was moved by the most recent keypress, adjust trim_top
# so that the new cursor position is within the displayed canvas part.
# But don't do this if the cursor is at the top/bottom edge so we can still scroll out
if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor:
s
|
google/DAPLink-port
|
test/stress_tests/hid_usb_test.py
|
Python
|
apache-2.0
| 2,358
| 0.000424
|
#
# DAPLink Interface Firmware
# Copyright (c) 2016-2017, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mbed_lstools
import threading
import time
import pyocd
should_exit = False
exit_cond = threading.Condition()
print_mut = threading.RLock()
global_start_time = time.time()
def _get_time():
return time.time() - global_start_time
def sync_print(msg):
with print_mut:
|
print(msg)
def hid_main(thread_index, board_id):
global should_exit
count = 0
try:
device = pyocd.probe.pydapaccess.DAPAccess.get_device(board_id)
while not should_exit:
device.open()
info = device.vendor(0)
info = str(bytearray(info[1:1 + info[0]]))
assert info == board_id
device.close()
if count % 100 == 0:
sync_print("Thread %i on loop %10i at %.6f - %s - board
|
%s" %
(thread_index, count, _get_time(),
time.strftime("%H:%M:%S"), board_id))
count += 1
except:
sync_print("Thread %i exception board %s" % (thread_index, board_id))
with exit_cond:
should_exit = 1
exit_cond.notify_all()
raise
def main():
global should_exit
lstools = mbed_lstools.create()
mbed_list = lstools.list_mbeds()
for thread_index, mbed in enumerate(mbed_list):
msd_thread = threading.Thread(target=hid_main,
args=(thread_index, mbed['target_id']))
msd_thread.start()
try:
with exit_cond:
while not should_exit:
exit_cond.wait(1)
except KeyboardInterrupt:
pass
should_exit = True
sync_print("Exiting")
if __name__ == "__main__":
main()
|
kennedyshead/home-assistant
|
homeassistant/components/yi/camera.py
|
Python
|
apache-2.0
| 5,013
| 0.000798
|
"""Support for Xiaomi Cameras (HiSilicon Hi3518e V200)."""
import asyncio
import logging
from aioftp import Client, StatusCodeError
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PATH,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
_LOGGER = logging.getLogger(__name__)
DEFAULT_BRAND = "YI Home Camera"
DEFAULT_PASSWORD = ""
DEFAULT_PATH = "/tmp/sd/record" # nosec
DEFAULT_PORT = 21
DEFAULT_USERNAME = "root"
DEFAULT_ARGUMENTS = "-pred 1"
CONF_FFMPEG_ARGUMENTS = "ffmpeg_arguments"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_FFMPEG_ARGUMENTS, default=DEFAULT_ARGUMENTS): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Yi Camera."""
async_add_entities([YiCamera(hass, config)], True)
class YiCamera(Camera):
"""Define an implementation of a Yi Camera."""
def __init__(self, hass, config):
"""Initialize."""
super().__init__()
self._extra_arguments = config.get(CONF_FFMPEG_ARGUMENTS)
self._last_image = None
self._last_url = None
self._manager = hass.data[DATA_FFMPEG]
self._name = config[CONF_NAME]
self._is_on = True
self.host = config[CONF_HOST]
self.port = config[CONF_PORT]
self.path = config[CONF_PATH]
self.user = config[CONF_USERNAME]
self.passwd = config[CONF_PASSWORD]
@property
def brand(self):
"""Camera brand."""
return DEFAULT_BRAND
@property
def is_on(self):
"""Determine whether the camera is on."""
return self._is_on
@property
def name(self):
"""Return the name of this camera."""
return self._name
async def _get_latest_video_url(self):
"""Retrieve the latest video file from
|
the customized Yi FTP server."""
ftp = Client()
try:
await ftp.connect(self.host)
await ftp.login(self.user, self.passwd)
except (ConnectionRefusedError, StatusCodeError) as err:
raise PlatformNotReady(err) from err
try:
await ftp.change_directory(self.path)
dirs = []
for path, attrs in await ftp.list():
if attrs["type"] == "dir" and "." not in
|
str(path):
dirs.append(path)
latest_dir = dirs[-1]
await ftp.change_directory(latest_dir)
videos = []
for path, _ in await ftp.list():
videos.append(path)
if not videos:
_LOGGER.info('Video folder "%s" empty; delaying', latest_dir)
return None
await ftp.quit()
self._is_on = True
return (
f"ftp://{self.user}:{self.passwd}@{self.host}:"
f"{self.port}{self.path}/{latest_dir}/{videos[-1]}"
)
except (ConnectionRefusedError, StatusCodeError) as err:
_LOGGER.error("Error while fetching video: %s", err)
self._is_on = False
return None
async def async_camera_image(self):
"""Return a still image response from the camera."""
url = await self._get_latest_video_url()
if url and url != self._last_url:
ffmpeg = ImageFrame(self._manager.binary)
self._last_image = await asyncio.shield(
ffmpeg.get_image(
url, output_format=IMAGE_JPEG, extra_cmd=self._extra_arguments
),
)
self._last_url = url
return self._last_image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
if not self._is_on:
return
stream = CameraMjpeg(self._manager.binary)
await stream.open_camera(self._last_url, extra_cmd=self._extra_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
self._manager.ffmpeg_stream_content_type,
)
finally:
await stream.close()
|
openai/cleverhans
|
tests_tf/test_mnist_tutorial_keras.py
|
Python
|
mit
| 2,129
| 0.003758
|
# pylint: disable=missing-docstring
import unittest
import numpy as np
# pylint bug on next line
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
from cleverhans.devtools.checks import CleverHansTest
HAS_GPU = 'GPU' in {x.device_type for x in device_lib.list_local_
|
devices()}
class TestMNISTTutorialKeras(CleverHansTest):
def test_mnist_tutorial_keras(self):
import tensorflow as tf
from cleverhans_tutorials import mnist_tutorial_keras
# Run the MNIST tutorial on a dataset of reduced size
test_dataset_indice
|
s = {'train_start': 0,
'train_end': 5000,
'test_start': 0,
'test_end': 333,
'nb_epochs': 2,
'testing': True}
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report = mnist_tutorial_keras.mnist_tutorial(**test_dataset_indices)
# Check accuracy values contained in the AccuracyReport object
self.assertTrue(report.train_clean_train_clean_eval > 0.90)
self.assertTrue(report.train_clean_train_adv_eval < 0.05)
self.assertTrue(report.train_adv_train_clean_eval > 0.90)
self.assertTrue(report.train_adv_train_adv_eval > 0.30)
atol_fac = 5e-2 if HAS_GPU else 1e-6
g = tf.Graph()
with g.as_default():
np.random.seed(42)
report_2 = mnist_tutorial_keras.mnist_tutorial(**test_dataset_indices)
self.assertClose(report.train_clean_train_clean_eval,
report_2.train_clean_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_clean_train_adv_eval,
report_2.train_clean_train_adv_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_clean_eval,
report_2.train_adv_train_clean_eval,
atol=atol_fac * 1)
self.assertClose(report.train_adv_train_adv_eval,
report_2.train_adv_train_adv_eval,
atol=atol_fac * 1)
if __name__ == '__main__':
unittest.main()
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/xsky_path20.py
|
Python
|
apache-2.0
| 1,808
| 0.017699
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.detach_volume, 'volume3'],
[TestAction.create_vm_snapshot, 'vm2', 'vm2-snapshot5'],
[TestAction.stop_vm, 'vm2'],
[TestAction.ps_migrate_vm, 'vm2'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot6'],
[TestAction.stop_vm, 'vm1'],
[TestAction.ps_migrate_vm, 'vm1'],
[TestAction.start_vm, 'vm1'],
[TestAction.attach_volume, 'vm2', 'volume3'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.delete_vm_snapshot, 'vm2-snapshot5'],
])
'''
The final status:
Running:['vm2', 'vm1']
Stopped:[]
Enadbled:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapsho
|
t1', 'vm1-snapshot6', 'volume1-snapshot6', 'volume2-snapshot6', 'vm1-backup1', 'volume1-backup1', 'volume2-backup1']
attached:['volume1', 'volume2', 'volu
|
me3']
Detached:[]
Deleted:['vm2-snapshot5']
Expunged:[]
Ha:[]
Group:
vm_snap3:['vm1-snapshot6', 'volume1-snapshot6', 'volume2-snapshot6']---vm1volume1_volume2
vm_snap1:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']---vm1volume1_volume2_volume3
vm_backup1:['vm1-backup1', 'volume1-backup1', 'volume2-backup1']---vm1_volume1_volume2
'''
|
stratosphereips/Manati
|
manati/share_modules/whois_distance.py
|
Python
|
agpl-3.0
| 12,100
| 0.007686
|
#!/usr/bin/env python
#
# Copyright (c) 2017 Stratosphere Laboratory.
#
# This file is part of ManaTI Project
# (see <https://stratosphereips.org>). It was created by 'Raul B. Netto <raulbeni@gmail.com>'
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. See the file 'docs/LICENSE' or see <http://www.gnu.org/licenses/>
# for copying permission.
#
import Levenshtein
import datetime
from tld import get_tld
import pprint as pp
import pythonwhois
from pythonwhois.shared import WhoisException
from contextlib import contextmanager
from collections import Iterable
from passivetotal.common.utilities import is_ip
import re
from passivetotal.libs.whois import *
import dateutil.parser
import config.settings as settings
from peewee import *
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import sys
import argparse
import os
import json
import time
import warnings
warnings.filterwarnings("ignore")
reload(sys)
sys.setdefaultencoding("utf-8")
from manati.analysis_sessions.models import WhoisConsult
KEY_DOMAIN_NAME = 'domain_name'
KEY_REGISTRAR = 'registrar'
KEY_NAME = 'name'
KEY_ORG = 'org'
KEY_ZIPCODE = 'zipcode'
KEY_CREATION_DATE = 'creation_date'
KEY_EXPIRATION_DATE = 'expiration_date'
KEY_EMAILS = 'emails'
KEY_NAME_SERVERS = 'name_servers'
RELATION_THRESHOLD = 75 #roc curve of the thesis
weights = [0,1,1,1,1,1,1,1]
def __levenshtein__(str1, str2):
str1 = str1.encode('utf-8')
str2 = str2.encode('utf-8')
return Levenshtein.distance(str1.lower(),str2.lower())
def __dist_domain__name__(domain_name_a, domain_name_b):
return __levenshtein__(str(domain_name_a).lower(), str(domain_name_b).lower())
def __dist_registrar__(registrar_a, registrar_b):
registrar_a = registrar_a if not registrar_a is None else ''
registrar_b = registrar_b if not registrar_b is None else ''
registrar_a = registrar_a.encode('utf-8') if not isinstance(registrar_a, list) else registrar_a[0].encode('utf-8')
registrar_b = registrar_b.encode('utf-8') if not isinstance(registrar_b, list) else registrar_b[0].encode('utf-8')
return __levenshtein__(str(registrar_a).lower(), str(registrar_b).lower())
def __dist_name__(name_a, name_b):
return __levenshtein__(str(name_a).lower(), str(name_b).lower())
def __dist_org_by_min_dist__(orgs_a=[], orgs_b=[]):
orgs_seed = orgs_a.split(',') if not isinstance(orgs_a, list) else orgs_a
orgs_file = orgs_b.split(',') if not isinstance(orgs_b, list) else orgs_b
if not orgs_seed and not orgs_file:
return float(0)
elif not orgs_seed:
orgs_seed = ['']
elif not orgs_file:
orgs_file = ['']
dist_org = __levenshtein__(str(orgs_seed[0]), str(orgs_file[0]))
for org_s in orgs_seed:
org_s = org_s.encode('utf-8')
for org_f in orgs_file:
org_f = org_f.encode('utf-8')
dist_org = min(str(dist_org), str(__levenshtein__(str(org_s), str(org_f))))
return float(dist_org)
def __dist_zipcode_by_min_dist__(zipcodes_a=[], zipcodes_b=[]):
zipcodes_seed = zipcodes_a.split(',') if not isinstance(zipcodes_a, list) else zipcodes_a
zipcodes_file = zipcodes_b.split(',') if not isinstance(zipcodes_b, list) else zipcodes_b
if not zipcodes_seed and not zipcodes_file:
return float(0)
elif not zipcodes_seed:
zipcodes_seed = ['']
elif not zipcodes_file:
zipcodes_file = ['']
dist_zipcode = __levenshtein__(str(zipcodes_seed[0]), str(zipcodes_file[0]))
for zipcode_s in zipcodes_seed:
for zipcode_f in zipcodes_file:
dist_zipcode = min(str(dist_zipcode), str(__levenshtein__(str(zipcode_s), str(zipcode_f))))
return float(dist_zipcode)
def get_date_aux(date):
try:
return datetime.datetime.strptime(date, '%d-%m-%Y') \
if not isinstance(date, datetime.datetime) else date
except Exception as ex:
return dateutil.parser.parse(date)
# ttl by proportion, more close tu cero, more close is the ttl
def get_diff_ttl(creation_date_a, creation_date_b,expiration_date_a, expiration_date_b):
if not creation_date_a and not creation_date_b and not expiration_date_a and not expiration_date_a:
return float(0)
elif not creation_date_a and not creation_date_b and expiration_date_a and expiration_date_b:
|
if expiration_date_a == expiration_date_a:
return float(0)
else:
return float(1)
elif creation_date_a and creation_date_b and not expiration_date_a and not expiration_date_b:
if creation_date_a == creation_date_a:
return float(0)
else:
return float(1)
elif
|
not creation_date_a or not creation_date_b or not expiration_date_a or not expiration_date_b:
return float(1)
else:
cd_a = get_date_aux(creation_date_a)
ed_a = get_date_aux(expiration_date_a)
cd_b = get_date_aux(creation_date_b)
ed_b = get_date_aux(expiration_date_b)
ttl_days_b = float(abs(cd_b - ed_b).days) # time to live
ttl_days_a = float(abs(cd_a - ed_a).days)
if ttl_days_b == ttl_days_a:
return float(0)
else:
return float(1) - ((ttl_days_b / ttl_days_a) if ttl_days_b <= ttl_days_a else (ttl_days_a / ttl_days_b))
# Method computing distance where emails are measured with "taking the minimun distance techniques "
def get_diff_emails_by_min_dist(emails_a=[], emails_b=[]):
emails_seed = emails_a.split(',') if not isinstance(emails_a, list) else emails_a
emails_file = emails_b.split(',') if not isinstance(emails_b, list) else emails_b
if not emails_seed and not emails_file:
return float(0)
elif not emails_seed:
emails_seed = ['']
elif not emails_file:
emails_file = ['']
dist_email = __levenshtein__(str(emails_seed[0]), str(emails_file[0]))
for email_s in emails_seed:
for email_f in emails_file:
dist_email = min(str(dist_email), str(__levenshtein__(str(email_s), str(email_f))))
return float(dist_email)
# Method computing distance where name_servers are measured with "taking the minimun distance techniques "
def get_diff_name_servers_by_min_dist(name_servers_a=[], name_servers_b=[]):
if name_servers_a is None:
name_servers_a = []
if name_servers_b is None:
name_servers_b = []
name_servers_seed = name_servers_a.split(',') if not isinstance(name_servers_a, list) else name_servers_a
name_servers_file = name_servers_b.split(',') if not isinstance(name_servers_b, list) else name_servers_b
if not name_servers_seed and not name_servers_file:
return float(0)
elif not name_servers_seed:
name_servers_seed = ['']
elif not name_servers_file:
name_servers_file = ['']
dist_name_server = __levenshtein__(str(name_servers_seed[0]), str(name_servers_file[0]))
for name_server_s in name_servers_seed:
for name_server_f in name_servers_file:
dist_name_server = min(str(dist_name_server), str(__levenshtein__(str(name_server_s), str(name_server_f))))
return float(dist_name_server)
def features_domains_attr(domain_name_a, registrar_a, name_a, orgs_a, zipcodes_a, creation_date_a,
expiration_date_a, emails_str_a, name_servers_str_a,
domain_name_b, registrar_b, name_b, orgs_b, zipcodes_b, creation_date_b,
expiration_date_b, emails_str_b, name_servers_str_b, ):
dist_domain_name = __dist_domain__name__(domain_name_a, domain_name_b)
dist_regist
|
tim-tang/arctic-bear
|
setup.py
|
Python
|
mit
| 1,115
| 0
|
# coding: utf-8
import arctic
from
|
email.utils import parseaddr
from setuptools import setup, find_packages
kwargs = {}
try:
from babel.messages import frontend as babel
kwargs['cmdclass'] = {
'extract_messages': babel.extract_messages,
'update_catalog': babel.update_catalog,
'compile_catalog': babel.compile_catalog,
'init_catalog': babel.init_catalog,
}
kwargs['message_extractors'] = {
'arctic': [
('**.py', 'python', None),
('**/templates/**.h
|
tml', 'jinja2', {
'extensions': (
'jinja2.ext.autoescape,'
'jinja2.ext.with_,'
'jinja2.ext.do,'
)
})
]
}
except ImportError:
pass
author, author_email = parseaddr(arctic.__author__)
setup(
name='arctic',
version=arctic.__version__,
author=author,
author_email=author_email,
url='http://timtang.me/',
packages=find_packages(exclude=['tests', 'tests.*']),
license='MIT',
zip_safe=False,
include_package_data=True,
**kwargs
)
|
UTSA-ICS/keystone-kerberos
|
keystone/contrib/revoke/core.py
|
Python
|
apache-2.0
| 9,529
| 0.000105
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import extension
from keystone.common import manager
from keystone.contrib.revoke import model
from keystone import exception
from keystone.i18n import _
from keystone import notifications
from keystone.openstack.common import versionutils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
EXTENSION_DATA = {
'name': 'OpenStack Revoke API',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-REVOKE/v1.0',
'alias': 'OS-REVOKE',
'updated': '2014-02-24T20:51:0-00:00',
'description': 'OpenStack revoked token reporting mechanism.',
'links': [
{
'rel': 'describedby',
'type': 'text/html',
'href': ('https://github.com/openstack/identity-api/blob/master/'
'openstack-identity-api/v3/src/markdown/'
'identity-api-v3-os-revoke-ext.md'),
}
]}
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
SHOULD_CACHE = cache.should_cache_fn('revoke')
# TODO(ayoung): migrate from the token section
REVOCATION_CACHE_EXPIRATION_TIME = lambda: CONF.token.revocation_cache_time
def revoked_before_cutoff_time():
expire_delta = datetime.timedelta(
seconds=CONF.token.expiration + CONF.revoke.expiration_buffer)
oldest = timeutils.utcnow() - expire_delta
return oldest
@dependency.provider('revoke_api')
class Manager(manager.Manager):
"""Revoke API Manager.
Performs common logic for recording revocations.
"""
def __init__(self):
super(Manager, self).__init__(CONF.revoke.driver)
self._register_listeners()
self.model = model
def _user_callback(self, service, resource_type, operation,
payload):
self.revoke_by_user(payload['resource_info'])
def _role_callback(self, service, resource_type, operation,
payload):
self.revoke(
model.RevokeEvent(role_id=payload['resource_info']))
def _project_callback(self, service, resource_type, operation,
payload):
self.revoke(
model.RevokeEvent(project_id=payload['resource_info']))
def _domain_callback(self, service, resource_type, operation,
payload):
self.revoke(
model.RevokeEvent(domain_id=payload['resource_info']))
def _trust_callback(self, service, resource_type, operation,
payload):
self.revoke(
model.RevokeEvent(trust_id=payload['resource_info']))
def _consumer_callback(self, service, resource_type, operation,
payload):
self.revoke(
model.RevokeEvent(consumer_id=payload['resource_info']))
def _access_token_callback(self, service, resource_type, operation,
payload):
self.revoke(
model.RevokeEvent(access_token_id=payload['resource_info']))
def _group_callback(self, service, resource_type, operation, payload):
user_ids = (u['id'] for u in self.identity_api.list_users_in_group(
payload['resource_info']))
for uid in user_ids:
self.revoke(model.RevokeEvent(user_id=uid))
def _register_listeners(self):
callbacks = {
notifications.ACTIONS.deleted: [
['OS-TRUST:trust', self._trust_callback],
['OS-OAUTH1:consumer', self._consumer_callback],
['OS-OAUTH1:access_token', self._access_token_callback],
['role', self._role_callback],
['user', self._user_callback],
['project', self._project_callback],
],
notifications.ACTIONS.disabled: [
['user', self._user_callback],
['project', self._project_callback],
['domain', self._domain_callback],
],
notifications.ACTIONS.internal: [
[notifications.INVALIDATE_USER_TOKEN_PERSISTENCE,
self._user_callback],
]
}
for event, cb_info in six.iteritems(callbacks):
for resource_type, callback_fns in cb_info:
notifications.register_event_callback(event, resource_type,
callback_fns)
def revoke_by_user(self, user_id):
return self.revoke(model.RevokeEvent(user_id=user_id))
def _assert_not_domain_and_project_scoped(self, domain_id=None,
project_id=None):
if domain_id is not None and project_id is not None:
msg = _('The revoke call must not have both domain_id and '
'project_id. This is a bug in the Keystone server. The '
'current request is aborted.')
raise exception.UnexpectedError(exception=msg)
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
remove_in=0)
def revoke_by_expiration(self, user_id, expires_at,
domain_id=None, project_id=None):
self._assert_not_domain_and_project_scoped(domain_id=domain_id,
project_id=project_id)
self.revoke(
model.RevokeEvent(user_id=user_id,
expires_at=expires_at,
domain_id=domain_id,
project_id=project_id))
def revoke_by_audit_id(self, audit_id):
self.revoke(model.RevokeEvent(audit_id=audit_id))
def revoke_by_audit_chain_id(self, audit_chain_id, project_id=None,
domain_id=None):
self._assert_not_domain_and_project_scoped(domain_id=domain_id,
project_id=project_id)
self.revoke(model.RevokeEvent(audit_chain_id=audit_chain_id,
domain_id=domain_id,
project_id=project_id))
def revoke_by_grant(self, role_id, user_id=None,
domain_id=None, project
|
_id=None):
self.revoke(
model.RevokeEvent(user_id=user_id,
role_id=role_id,
domain_id=domain_id,
project_id=project_id))
def revoke_by_user_and_project(self, user_id, project_id):
self.revoke(
model.RevokeEvent(project_id=project_id, user_id=user_id))
def revoke_by_project_role_assignment(self, p
|
roject_id, role_id):
self.revoke(model.RevokeEvent(project_id=project_id, role_id=role_id))
def revoke_by_domain_role_assignment(self, domain_id, role_id):
self.revoke(model.RevokeEvent(domain_id=domain_id, role_id=role_id))
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=REVOCATION_CACHE_EXPIRATION_TIME)
def _get_revoke_tree(self):
events = self.driver.get_events()
revoke_tree = model.RevokeTree(revoke_events=events)
return revoke_tree
def check_token(self, token_values):
"""Checks the values from a token against the revocation list
:param token_values: dictionary of values from a token,
normalized fo
|
tseaver/google-cloud-python
|
bigquery/samples/tests/test_table_exists.py
|
Python
|
apache-2.0
| 1,098
| 0
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# d
|
istributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import bigquery
from .. import table_exists
def test_table_exists(capsys, client, random_table_id):
table_exists.table_exists(client, random_table_id)
out, err = capsys.readouterr()
|
assert "Table {} is not found.".format(random_table_id) in out
table = bigquery.Table(random_table_id)
table = client.create_table(table)
table_exists.table_exists(client, random_table_id)
out, err = capsys.readouterr()
assert "Table {} already exists.".format(random_table_id) in out
|
CompassionCH/compassion-switzerland
|
partner_communication_switzerland/wizards/child_order_picture.py
|
Python
|
agpl-3.0
| 4,752
| 0.00021
|
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
import base64
import tempfile
import os
from io import BytesIO
from zipfile import ZipFile
from odoo import api, models, fields
_logger = logging.getLogger(__name__)
# Limit number of photos to handle at a time to avoid memory issues
NUMBER_LIMIT = 80
try:
from pdf2image import convert_from_path
except ImportError:
_logger.debug("Can not `import pdf2image`.")
class CompassionHold(models.TransientModel):
_inherit = "mail.activity.mixin"
_name = "child.order.picture.wizard"
sponsorship_ids = fields.Many2many(
"recurring.contract",
string="New biennials",
readonly=True,
default=lambda s: s._get_sponsorships(),
)
filename = fields.Char(default="child_photos.zip")
download_data = fields.Binary(readonly=True)
@api.model
def _get_sponsorships(self):
model = "recurring.contract"
if self.env.context.get("active_model") == model:
ids = self.env.context.get("active_ids")
if ids:
return self.env[model].browse(ids)
elif self.env.context.get("order_menu"):
return self.env[model].search(self._needaction_domain_get())
return False
@api.multi
def order_pictures(self):
return self._get_pictures()
@api.multi
def print_pictures(self):
return self._get_pictures(_print=True)
@api.multi
def _get_pictures(self, _print=False):
"""
|
Generate child pictures with white frame and make a downloadable
ZIP file or generate a report for printing.
:param _print: Set to true for PDF generation instead of ZIP file.
:return: Window Action
"""
sponsorships = self.sponsorship_ids[:NUMBER_LIMIT]
if _print:
report = self.env.ref
|
(
"partner_communication_switzerland.report_child_picture")
res = report.report_action(
sponsorships.mapped("child_id.id"), config=False
)
else:
self.download_data = self._make_zip()
res = {
"type": "ir.actions.act_window",
"view_type": "form",
"view_mode": "form",
"res_id": self.id,
"res_model": self._name,
"context": self.env.context,
"target": "new",
}
sponsorships.write({"order_photo": False})
# Log a note to recover the sponsorships in case the ZIP is lost
for s in sponsorships:
s.message_post("Picture ordered.")
return res
@api.multi
def _make_zip(self):
"""
Create a zip file with all pictures
:param self:
:return: b64_data of the generated zip file
"""
zip_buffer = BytesIO()
children = self.mapped("sponsorship_ids.child_id")[:NUMBER_LIMIT]
with ZipFile(zip_buffer, "w") as zip_data:
report_ref = self.env.ref(
"partner_communication_switzerland.report_child_picture"
).with_context(must_skip_send_to_printer=True)
pdf_data = report_ref.render_qweb_pdf(
children.ids,
data={"doc_ids": children.ids}
)[0]
pdf_temp_file, pdf_temp_file_name = tempfile.mkstemp()
os.write(pdf_temp_file, pdf_data)
pages = convert_from_path(pdf_temp_file_name)
for page_id, page in enumerate(pages):
child = self.env["compassion.child"].browse(
children.ids[page_id]
)
fname = str(child.sponsor_ref) + "_" + str(child.local_id) + ".jpg"
page.save(os.path.join("/tmp/", fname), "JPEG")
file_byte = open(os.path.join("/tmp/", fname), "br").read()
zip_data.writestr(fname, file_byte)
zip_buffer.seek(0)
return base64.b64encode(zip_buffer.read())
@api.model
def _needaction_domain_get(self):
return [
("order_photo", "=", True),
("state", "not in", [("terminated", "cancelled")]),
]
@api.model
def _needaction_count(self, domain=None):
""" Get the number of actions uid has to perform. """
return self.env["recurring.contract"].search_count(
self._needaction_domain_get()
)
|
psychopy/versions
|
psychopy/voicekey/signal.py
|
Python
|
gpl-3.0
| 1,069
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classes for signals to be sent upon voice-key trip events.
"""
from __future__ import absolute_import, print_function
import threading
class _BaseVoiceKeySignal(threading.Thread):
"""Class to support sending a signal up
|
on detection of an event.
Non-blocking unless you .join() the thread. An adjustable `delay` allows
a deferred start.
Subclass and override `signal`.
|
"""
def __init__(self, sec=0.010, delay=0, on=1, off=0):
super(_BaseVoiceKeySignal, self).__init__(None, 'EventSignal', None)
self.sec = sec
self.delay = delay
self.on = on
self.off = off
self.running = False
# self.daemon = True
self.id = None
def __repr__(self):
text = '<{0} instance, id={1}>'
return text.format(self.__class__.__name__, self.id)
def run(self):
self.running = True
self.signal()
self.running = False
def signal(self):
pass
def stop(self):
self.running = False
|
Farthen/OTFBot
|
otfbot/plugins/ircClient/karma.py
|
Python
|
gpl-2.0
| 7,588
| 0.002899
|
# This file is part of OtfBot.
#
# OtfBot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OtfBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of
|
the GNU General Public License
# along with OtfBot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2007 by Alexander Schier
#
"""
Track the karma of user supplied terms
"""
from otfbot.lib import chatMod
import pickle
import os
def sortedbyvalue(dict):
"""Helper functi
|
on to return a [(value, key)] list from a dict"""
items = [(k, v) for (v, k) in dict.items()]
items.reverse()
return items
class Plugin(chatMod.chatMod):
def __init__(self, bot):
self.bot = bot
self.karmas = {} #channel -> (what -> karma-struct)
self.karmapaths = {} #path -> (what -> channel) (pointer!)
self.verbose = self.bot.config.getBool("karma.verbose", True)
self.freestyle = self.bot.config.getBool("karma.freestyle", True)
def loadKarma(self, channel):
if not os.path.exists(datadir):
os.makedirs(datadir)
karmapath = self.bot.config.getPath("file", datadir, "karma.dat", "karma", self.bot.network, channel)
if not karmapath in self.karmapaths.keys():
if os.path.exists(karmapath):
#TODO: blocking
karmafile = open(karmapath, "r")
self.karmas[channel] = pickle.load(karmafile)
self.karmapaths[karmapath] = channel
karmafile.close()
else:
self.karmas[channel] = {} #what -> karma-struct
else:
# pointer(!) to shared karma
self.karmas[channel] = self.karmas[self.karmapaths[karmapath]]
def saveKarma(self, channel):
#Attention: we write the files from karmapaths(which are unique), not
# from the channels array!
#TODO: blocking
karmapath = self.bot.config.getPath("file", datadir, "karma.dat", "karma", self.bot.network, channel)
karmafile = open(karmapath, "w")
pickle.dump(self.karmas[channel], karmafile)
karmafile.close()
def joined(self, channel):
self.loadKarma(channel)
def left(self, channel):
self.saveKarma(channel)
def command(self, user, channel, command, options):
up = False
what = None
reason = None
#return on why/who karma up/down
num_reasons = 5
num_user = 5
tmp = options.split("#", 1)
options = tmp[0].strip()
if len(tmp) == 2:
reason = tmp[1]
if command == "karma":
if options == "":
rmsg = "Nutzen: !karma name++ oder !karma name--"
self.bot.sendmsg(channel, rmsg)
return
else:
if options[-2:] == "++":
up = True
what = options[:-2]
elif options[-2:] == "--":
up = False
what = options[:-2]
else:
self.tell_karma(options, channel)
return
self.do_karma(channel, what, up, reason, user)
if self.verbose:
self.tell_karma(what, channel)
elif command == "why-karmaup" or command == "wku":
options.strip()
reasons = ""
if options in self.karma.keys():
num = min(num_reasons, len(self.karma[options][3]))
while num > 0:
num -= 1
reasons += " .. " + self.karma[options][3][-num]
reasons = reasons[4:]
self.bot.sendmsg(channel, reasons)
elif command == "why-karmadown" or command == "wkd":
options.strip()
reasons = ""
if options in self.karma.keys():
num = min(num_reasons, len(self.karma[options][4]))
while num > 0:
num -= 1
reasons += " .. " + self.karma[options][4][-num]
reasons = reasons[4:]
self.bot.sendmsg(channel, reasons)
elif command == "who-karmaup":
options.strip()
people = ""
if options in self.karma.keys():
items = sortedbyvalue(self.karma[options][1])
num = min(num_user, len(items))
while num > 0:
num -= 1
people += " .. " + items[-num][1] + "=" + str(items[-num][0])
people = people[4:]
self.bot.sendmsg(channel, people)
elif command == "who-karmadown":
options.strip()
people = ""
if options in self.karma.keys():
items = sortedbyvalue(self.karma[options][2])
num = min(num_user, len(items))
while num > 0:
num -= 1
people += " .. " + items[-num][1] + "=" + str(items[-num][0])
people = people[4:]
self.bot.sendmsg(channel, people)
elif self.freestyle:
if options[-2:] == "++":
up = True
what = command + " " + options[:-2]
elif options[-2:] == "--":
up = False
what = command + " " + options[:-2]
elif command[-2:] == "++":
up = True
what = command[:-2]
elif command[-2:] == "--":
up = False
what = command[:-2]
if what:
self.do_karma(channel, what, up, reason, user)
if self.verbose:
self.tell_karma(what, channel)
def tell_karma(self, what, channel):
self.bot.sendmsg(channel, "Karma: " + what + ": " + str(self.get_karma(channel, what)))
def get_karma(self, channel, what):
if not what in self.karmas[channel].keys():
self.karmas[channel][what] = [0, {}, {}, [], []] #same as below!
return self.karmas[channel][what][0]
def do_karma(self, channel, what, up, reason, user):
user = user.split("!")[0]
karma = self.karmas[channel]
if not what in karma.keys():
# score, who-up, who-down, why-up, why-down
karma[what] = [0, {}, {}, [], []]
if up:
karma[what][0] = int(karma[what][0]) + 1
if not user in karma[what][1].keys():
karma[what][1][user] = 1
else:
karma[what][1][user] += 1
if reason:
karma[what][3].append(str(reason))
else:
karma[what][0] = int(karma[what][0]) - 1
if not user in karma[what][2].keys():
karma[what][2][user] = 1
else:
karma[what][2][user] += 1
if reason:
karma[what][4].append(str(reason))
def stop(self):
for karmapath in self.karmapaths.keys():
self.saveKarma(self.karmapaths[karmapath])
def start(self):
for c in self.bot.channels:
self.joined(c)
|
yephper/django
|
tests/template_tests/filter_tests/test_make_list.py
|
Python
|
bsd-3-clause
| 1,654
| 0.004232
|
from django.template.defaultfilters import make_list
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils.safestring import mark_safe
from ..utils import setup
class MakeListTests(SimpleTestCase):
"""
The make_list filter can destroy existing escaping, so the results are
escaped.
"""
@setup({'make_list01': '{% autoescape off %}{{ a|make_list }}{% endautoescape %}'})
def test_make_list01(self):
output = self.engine.render_to_string('make_list01', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&
|
']"))
@setup({'make_list02': '{{ a|make_list }}'})
def test_make_list02(self):
output = self.engine.render_to_string('make_list02', {"a": mark_safe(
|
"&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list03':
'{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}'})
def test_make_list03(self):
output = self.engine.render_to_string('make_list03', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list04': '{{ a|make_list|stringformat:"s"|safe }}'})
def test_make_list04(self):
output = self.engine.render_to_string('make_list04', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
class FunctionTests(SimpleTestCase):
def test_string(self):
self.assertEqual(make_list('abc'), ['a', 'b', 'c'])
def test_integer(self):
self.assertEqual(make_list(1234), ['1', '2', '3', '4'])
|
0ffkilter/StunfiskBot
|
StunfiskBot.py
|
Python
|
mit
| 15,210
| 0.007692
|
import praw, argparse, sys, json, re, os, time, traceback
from peewee import *
from logins import *
from var_dicts import *
user_agent = "StunfiskHelperBot v0.1.1 by /u/0ffkilter"
reddit = praw.Reddit(user_agent=user_agent)
reddit.login(bot_name, bot_password)
c_db = MySQLDatabase(database='stunbot', host='localhost', user='root', passwd=sql_password)
c_db.connect()
class Comment(Model):
sub_id = CharField()
class Meta:
database = c_db
Comment.create_table(True)
def main():
print('starting bot')
while True:
try:
comments = praw.helpers.comment_stream(reddit, 'stunfisk', limit=None, verbosity=0)
for comment in comments:
print comment.id
if not comment_read(comment.id):
Comment.create(sub_id=comment.id)
comment_string = base_string
parent = False
confirm = False
for line in comment.body.strip().split('\n'):
if '+stunfiskhelp' in line:
print('comment found! %s' %(comment.id))
parent = '-parent' in line
confirm = '-confirm' in line
line = line.replace('-parent', '')
line = line.replace('-confirm', '')
comment_string = comment_string + process_comment(line.replace('+stunfiskhelp', '').lower(), comment) + '\n\n***\n\n'
if comment_string is not base_string:
comment_string = comment_string + suffix
reply(comment_string, comment, parent, parent and confirm)
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(traceback.format_exc())
def get_learn(pokemon, move):
move = move.replace(' ', '')
if 'mega' in pokemon and (not 'yanmega' in pokemon and not 'meganium' in pokemon):
pokemon = pokemon[:pokemon.index('mega')]
print('%s -> %s' %(pokemon, move))
if move in learnsets[pokemon]['learnset']:
return learnsets[pokemon]['learnset'][move]
else:
if 'prevo' in pokedex[pokemon]:
return get_learn(pokedex[pokemon]['prevo'], move)
else:
return []
def can_learn(pokemon, move):
move = move.replace(' ', '')
if 'mega' in pokemon and (not 'yanmega' in pokemon and not 'meganium' in pokemon):
pokemon = pokemon[:pokemon.index('mega')]
if move in learnsets[pokemon]['learnset']:
return True
else:
if 'prevo' in pokedex[pokemon]:
return can_learn(pokedex[pokemon]['prevo'].lower(), move)
else:
return False
def set_learns(pokemon, moves):
if moves == []:
return True
return all(any(can_learn(pokemon, sub) for sub in move.split('/')) for move in moves)
def set_abilities(pokemon, abilities):
if abilities == []:
return True
return all(any(abil.lower().title() in pokedex[pokemon]['abilities'].values() for abil in ability.split('/')) for ability in abilities)
def set_types(pokemon, poke_types):
if poke_types == []:
return True
return all(any(poke_typ.lower().title() in pokedex[pokemon]['types'] for poke_typ in poke_type.split('/')) for poke_type in poke_types)
def set_gens(pokemon, gens):
if gens == []:
return True
return get_gen(pokemon) in list(map(int, gens.split('/')))
def get_prevo(pokemon):
return str(pokedex[pokemon]['prevo']) if 'prevo' in pokedex[pokemon] else 'None'
def get_evo(pokemon):
return str(pokedex[pokemon]['evos'][0]) if 'evos' in pokedex[pokemon] else 'None'
def get_gen(pokemon):
num = pokedex[pokemon]['num']
for index, gen in enumerate(gens):
if num in gen:
return (index + 1)
return 0
def keys_to_string(keys):
if keys:
result = ''
for key in keys:
result = result + gen_string(key) + '\n\n'
return resu
|
lt
else:
return 'No Results Found\n\n'
def gen_string(key):
string = '* Generation ' + key[0] + ' through ' + learn_types[key[1]]
if key[1] == 'l':
string = string + ' at Level ' + key[2:]
return string
def stats_to_string(pokemon):
string = ''
for stat in stats:
string += '>>' +
|
stat + ': ' + str(pokedex[pokemon]['baseStats'][stat]) + '\n\n'
return string
def get_last_set(sections):
for i in range(len(sections)):
try:
if sections[i].index('Nature') == 0:
return i
except ValueError:
pass
return 3
def get_set_names(pokemon):
page = reddit.get_wiki_page('Stunfisk', pokemon)
sections = page.content_md.split('##')
if is_format_correct(page):
names = sections[3:get_last_set(sections)]
for index, name in enumerate(names):
if '#' in name:
name = name.replace('#', '')[:name.index('\n')]
names[index] = name
print('Sets found -> %s', names)
return names
else:
print('Incorrectly formatted page for: %s' %pokemon)
return []
def is_format_correct(wiki_page):
sections = wiki_page.content_md.split('##')
return not sections[3][:7] == 'Nature'
def format_poke(pokemon):
if '-' in pokemon:
if 'rotom' in pokemon:
pokemon = pokemon[:pokemon.index('-')] + rotom_forms[pokemon[pokemon.index('-') + 1:]]
else:
pokemon = pokemon[:pokemon.index('-')] + dex_suffixes[pokemon[pokemon.index('-')+1:]]
return pokemon
def format_poke_set(pokemon):
if '-' in pokemon:
if 'rotom' in pokemon:
pokemon = pokemon[:pokemon.index('-')+1] + rotom_forms[pokemon[pokemon.index('-')+1:]]
print('rotom form! -> %s' %pokemon)
else:
pokemon = pokemon[:pokemon.index('-')+1] + dex_suffixes[pokemon[pokemon.index('-')+1:]]
return pokemon
def sort_by_bst(pokemon):
poke_dict = {poke:sum(pokedex[poke]['baseStats'].values()) for poke in pokemon}
return sorted(poke_dict, key=poke_dict.get, reverse=True)
def process_comment(line, comment):
if 'tell me a joke' in line:
return 'Your Life'
parent = '-parent' in line
line = line.replace('-parent', '')
confirm = '-confirm' in line and parent
line = line.replace('-confirm', '')
if 'moveset' in line:
number = 30
numbers = [int(s) for s in line.split() if s.isdigit()]
if numbers == []:
number = 30
else:
if (number > 100):
number = 100
else:
number = numbers[0]
for number in numbers:
line = line.replace(str(number), '')
line = line.replace('moveset', '')
moves = line.replace(' ', '').split(',')
return moveset_comment(moves, number)
elif 'search' in line:
line = line.replace('search', '')
moves = []
types = []
abilities = []
gens = []
sections = line.split('|')
for section in sections:
if 'move:' in section or 'moves:' in section:
moves = section[section.index(':')+1:].replace(' ', '').split(',')
elif 'ability:' in section or 'abilities:' in section:
abilities = section[section.index(':')+1:].strip().split(',')
elif 'type:' in section or 'types:' in section:
types = section[section.index(':')+1:].strip().split(',')
elif 'gen:' in section or 'gens:' in section:
gens = section[section.index(':')+ 1:].strip()
return search_comment(moves, abilities, types, gens)
else:
line.strip()
sections = line.strip().split(' ')
pokemon = sections[0]
mode = sections[1]
args = ''.join(sections[2:]).split(',')
comment_string = ''
print('Pokemon: %s Mode: %s
|
cloudkick/libcloud
|
libcloud/storage/drivers/dummy.py
|
Python
|
apache-2.0
| 14,148
| 0.00417
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import random
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ObjectDoesNotExistError
class DummyFileObject(file):
def __init__(self, yield_count=5, chunk_len=10):
self._yield_count = yield_count
self._chunk_len = chunk_len
def read(self, size):
i = 0
while i < self._yield_count:
yield self._get_chunk(self._chunk_len)
i += 1
raise StopIteration
def _get_chunk(self, chunk_len):
chunk = [str(x) for x in random.randint(97, 120)]
return chunk
def __len__(self):
return self._yield_count * self._chunk_len
class DummyIterator(object):
def __init__(self, data=None):
self._data = data or []
self._current_item = 0
def next(self):
if self._current_item == len(self._data):
raise StopIteration
value = self._data[self._current_item]
self._current_item += 1
return value
class DummyStorageDriver(StorageDriver):
"""
Dummy Storage driver.
>>> from libcloud.storage.drivers.dummy import DummyStorageDriver
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(container_name='test container')
>>> container
<Container: name=test container, provider=Dummy Storage Provider>
>>> container.name
'test container'
>>> container.extra['object_count']
0
"""
name = 'Dummy Storage Provider'
def __init__(self, api_key, api_secret):
self._containers = {}
def get_meta_data(self):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_meta_data()
{'object_count': 0, 'container_count': 0, 'bytes_used': 0}
>>> container = driver.create_container(container_name='test container 1')
>>> container = driver.create_container(container_name='test container 2')
>>> obj = container.upload_object_via_stream(object_name='test object', iterator=DummyFileObject(5, 10), extra={})
>>> driver.get_meta_data()
{'object_count': 1, 'container_count': 2, 'bytes_used': 50}
"""
container_count = len(self._containers)
object_count = sum([ len(self._containers[container]['objects']) for
container in self._containers ])
|
bytes_used = 0
for container in self._containers:
objects = self._containers[container]['objects']
for _, obj in objects.iteritems():
bytes_used += obj.size
return { 'container_count': int(container_count),
'object_count': int(object_count),
'bytes_used':
|
int(bytes_used) }
def list_containers(self):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.list_containers()
[]
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> container = driver.create_container(container_name='test container 2')
>>> container
<Container: name=test container 2, provider=Dummy Storage Provider>
>>> container = driver.create_container(container_name='test container 2') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerAlreadyExistsError:
>>> container_list=driver.list_containers()
>>> sorted([container.name for container in container_list])
['test container 1', 'test container 2']
"""
return [container['container'] for container in
self._containers.values()]
def list_container_objects(self, container):
container = self.get_container(container.name)
return container.objects
def get_container(self, container_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> driver.get_container('test container 1')
<Container: name=test container 1, provider=Dummy Storage Provider>
"""
if container_name not in self._containers:
raise ContainerDoesNotExistError(driver=self, value=None,
container_name=container_name)
return self._containers[container_name]['container']
def get_object(self, container_name, object_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_object('unknown', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> driver.get_object('test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ObjectDoesNotExistError:
>>> obj = container.upload_object_via_stream(object_name='test object', iterator=DummyFileObject(5, 10), extra={})
>>> obj
<Object: name=test object, size=50, hash=None, provider=Dummy Storage Provider ...>
"""
container = self.get_container(container_name)
container_objects = self._containers[container_name]['objects']
if object_name not in container_objects:
raise ObjectDoesNotExistError(object_name=object_name, value=None,
driver=self)
return container_objects[object_name]
def create_container(self, container_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(container_name='test container 1')
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container = driver.create_container(container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerAlreadyExistsError:
"""
if container_name in self._containers:
raise ContainerAlreadyExistsError(container_name=container_name,
value=None, driver=self)
extra = { 'object_count': 0 }
container = Container(name=container_name, extra=extra, driver=self)
self._containers[container_name] = { 'container': container,
'objects': {}
}
return container
def delete_container(self, container):
"""
>>> dri
|
helenwarren/pied-wagtail
|
wagtail/wagtaildocs/views/documents.py
|
Python
|
bsd-3-clause
| 5,054
| 0.001979
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import permission_required
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtaildocs.models import Document
from wagtail.wagtaildocs.forms import DocumentForm
@permission_required('wagtaildocs.add_document')
@vary_on_headers('X-Requested-With')
def index(request):
# Get documents
documents = Document.objects.all()
# Ordering
if 'ordering' in request.GET and request.GET['ordering'] in ['title', '-created_at']:
ordering = request.GET['ordering']
else:
ordering = '-created_at'
documents = documents.order_by(ordering)
# Permissions
if not request.user.has_perm('wagtaildocs.change_document'):
# restrict to the user's own documents
documents = documents.filter(uploaded_by_user=request.user)
# Search
query_string = None
if 'q' in request.GET:
form = SearchForm(request.GET, placeholder=_("Search documents"))
if form.is_valid():
query_string = form.cleaned_data['q']
if not request.user.has_perm('wagtaildocs.change_document'):
# restrict to the user's own documents
documents = Document.search(query_string, filters={'uploaded_by_user_id': request.user.id})
else:
documents = Document.search(query_string)
else:
form = SearchForm(placeholder=_("Search documents"))
# Pagination
p = request.GET.get('p', 1)
paginator = Paginator(documents, 20)
try:
documents = paginator.page(p)
except PageNotAnInteger:
documents = paginator.page(1)
except EmptyPage:
documents = paginator.page(paginator.num_pages)
# Create response
if request.is_ajax():
return render(request, 'wagtaildocs/documents/results.html', {
'ordering': ordering,
'documents': documents,
'query_string': query_string,
'is_searching': bool(query_string),
})
else:
return render(request, 'wagtaildocs/documents/index.html', {
'ordering': ordering,
'documents': documents,
'query_string': query_string,
'is_searching': bool(query_string),
'search_form': form,
'popular_tags': Document.popular_tags(),
})
@permission_required('wagtaildocs.add_document')
def add(request):
if request.POST:
doc = Document(uploaded_by_user=request.user)
form = DocumentForm(request.POST, request.FILES, instance=doc)
if form.is_valid():
form.save()
messages.success(request, _("Document '{0}' added.").format(doc.title))
return redirect('wagtaildocs_index')
else:
messages.error(request, _("The document could not be saved due to errors."))
else:
form = DocumentForm()
return render(request, "wagtaildocs/documents/add.html", {
'form': form,
})
@permission_required('wagtailadmin.access_admin') # more specific permission tests are applied within the view
def edit(request, document_id):
doc = get_object_or_404(Document, id=document_id)
if not doc.is_editable_by_user(request.user):
raise PermissionDenied
if request.POST:
original_file = doc.file
form = DocumentForm(request.POST, request.FILES, instance=doc)
if form.is_valid():
if 'file' in form.changed_data:
# if providing a new document file, delete the old one.
# NB Doing this via original_file.delete() clears the file field,
# which definitely isn't what we want...
original_file.storage.delete(original_file.name)
doc = form.save()
messages.success(request, _("Document '{0}' updated").format(doc.title))
return redirect('wagtaildocs_index')
else:
messages.error(request, _("The document could not be saved due to errors."))
else:
form = DocumentForm(instance=doc)
return render(request, "wagtaildocs/documents/edit.html", {
'document': doc,
'form': form,
})
@permission_required('wagtailadmin.access_admin') # more specific permission tests are applied within the view
def delete(request, document_id):
doc = get_object_or_404(Documen
|
t, id=document_id)
if not doc.is_editable_by_user(request.user):
raise PermissionDenied
if request.POST:
doc.delete()
messages.success(request, _("Document '{0}' deleted.").format(doc.title
|
))
return redirect('wagtaildocs_index')
return render(request, "wagtaildocs/documents/confirm_delete.html", {
'document': doc,
})
|
jsilhan/dnf-plugins-extras
|
tests/test_repoclosure.py
|
Python
|
gpl-2.0
| 3,893
| 0.002312
|
# Copyright (C) 2015 Igor Gnatenko
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from tests.support import mock
import dnf.pycomp
import os
import repoclosure
import tests.support as support
import unittest
class TestRepoClosureFunctions(support.TestCase):
def setUp(self):
self.cmd = repoclosure.RepoClosureCommand(
support.CliStub(support.BaseStub()))
self.path = os.path.join(os.path.dirname(__file__), "resources/repoclosure/")
def test_repoid_option(self):
args = ["--repo", "main"]
self.cmd.base.repos.add(support.RepoStub("main"))
self.cmd.base.repos.add(support.RepoStub("main_fail"))
support.command_configure(self.cmd, args)
repos = [repo.id for repo in self.cmd.base.repos.iter_enabled()]
self.assertEqual(["main"], repos)
def test_check_option(self):
args = ["--check", "@commandline"]
self.cmd.base.repos.add(support.RepoStub("main"))
self.cmd.base.add_remote_rpms([os.path.join(self.path,
"noarch/foo-4-6.noarch.rpm")])
with mock.patch("sys.stdout", new_callable=dnf.pycomp.StringIO) as stdout:
support.command_run(self.cmd, args)
expected_out = ["package: foo-4-6.noarch from @commandline",
" unresolved deps:",
" bar = 4-6"]
self.assertEqual(stdout.getvalue()[:-1], "\n".join(expected_out))
args = ["--check", "main"]
with mock.patch("sys.stdout", new_callable=dnf.pycomp.StringIO) as stdout:
su
|
pport.command_run(self.c
|
md, args)
self.assertEmpty(stdout.getvalue())
def test_pkg_option(self):
args = ["--pkg", "foo"]
self.cmd.base.add_remote_rpms([os.path.join(self.path,
"noarch/foo-4-6.noarch.rpm")])
with mock.patch("sys.stdout", new_callable=dnf.pycomp.StringIO) as stdout:
support.command_run(self.cmd, args)
expected_out = ["package: foo-4-6.noarch from @commandline",
" unresolved deps:",
" bar = 4-6"]
self.assertEqual(stdout.getvalue()[:-1], "\n".join(expected_out))
args = ["--pkg", "bar"]
with mock.patch("sys.stdout", new_callable=dnf.pycomp.StringIO) as stdout:
support.command_run(self.cmd, args)
self.assertEmpty(stdout.getvalue())
def test_base(self):
args = []
self.cmd.base.add_remote_rpms([os.path.join(self.path,
"noarch/foo-4-6.noarch.rpm")])
with mock.patch("sys.stdout", new_callable=dnf.pycomp.StringIO) as stdout:
support.command_run(self.cmd, args)
expected_out = ["package: foo-4-6.noarch from @commandline",
" unresolved deps:",
" bar = 4-6"]
self.assertEqual(stdout.getvalue()[:-1], "\n".join(expected_out))
|
prospwro/odoo
|
addons/irsid_edu/models/module_work.py
|
Python
|
agpl-3.0
| 13,057
| 0.013965
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution Addon
# Copyright (C) 2009-2013 IRSID (<http://irsid.ru>),
# Paul Korotkov (korotkov.paul@gmail.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from addons.irsid_base.models.doc import DOC_STATES
class edu_module_work(models.Model):
_name = 'edu.module.work'
_description = 'Module Work'
_order = 'module, time, type'
# _rec_name = 'code'
# _track = {
# 'state': {
# 'irsid_edu.mt_module_work_updated': lambda self, cr, uid, obj, ctx=None: True,
# },
# }
# # Naming Functions
@api.depends('time.code','module.code','type.code')
def _compute_code(self):
self.code = self.module.code + '/' + self.time.code + '/' + self.type.code
# result = {}
# for work in self.browse(cr, uid, ids, context=context):
# result[work.id] = work.time.code + '/' + work.module.code + '/' + work.type.code
# return result
# # Update Functions
# def _update_list_by_time(self, cr, uid, ids, context=None):
# return self.pool.get('edu.module.work').search(cr, uid, [('time', 'in', ids)], context=context)
#
# def _update_list_by_module(self, cr, uid, ids, context=None):
# return self.pool.get('edu.module.work').search(cr, uid, [('module', 'in', ids)], context=context)
#
# def _update_list_by_type(self, cr, uid, ids, context=None):
# return self.pool.get('edu.module.work').search(cr, uid, [('type', 'in', ids)], context=context)
# # Onchange Functions
# def onchange_module(self, cr, uid, ids, module, context=None):
# if module:
# module = self.pool.get('edu.module').browse(cr, uid, module, context=context)
# return {'value': {
# 'program': module.program.id,
# 'location_id': module.location_id.id,
# 'employee': module.employee.id
# }}
# return {'value': {}}
#
# def onchange_time(self, cr, uid, ids, time, context=None):
# if time:
# time = self.pool.get('edu.time').browse(cr, uid, time, context=context)
# return {'value': {
# 'period': time.period.id,
# 'stage_id': time.stage_id.id,
# }}
# return {'value': {}}
#
# def onchange_type(self, cr, uid, ids, type, context=None):
# if type:
# type = self.pool.get('edu.work.type').browse(cr, uid, type, context=context)
# return {'value': {
# 'scale': type.scale.id,
# 'st_hours': type.st_hours,
# 'seance_hours': type.seance_hours,
# 'emp_hours': type.emp_hours,
# 'ind_work': type.ind_work,
# }}
# return {'value': {}}
#
# def onchange_seance_hours(self, cr, uid, ids, ind_work, seance_hours, context=None):
# if not ind_work:
# return {'value': {
# 'emp_hours': seance_hours,
# }}
# return {'value': {}}
# # Other Functions
# def _hours_get(self, cr, uid, ids, field_names, args, context=None):
# res = {}
# cr.execute("""
# SELECT
# work,
# SUM(st_hours),
# SUM(seance_hours),
# SUM(emp_hours)
# FROM
# edu_module_seance
# WHERE
# work IN %s
# GROUP BY
# work
# """,(tuple(ids),))
# hours = dict(map(lambda x: (x[0], (x[1],x[2],x[3])), cr.fetchall()))
# for work in self.browse(cr, uid, ids, context=context):
# res[work.id] = dict(zip(('eff_st_hours','eff_seance_hours','eff_emp_hours'),hours.get(work.id, (0.0,0.0,0.0))))
# return res
# # OpenChatter functions
# def _needaction_domain_get(self, cr, uid, context=None):
# if self.pool.get('res.users').has_group(cr, uid, 'irsid_edu.group_edu_prorector'):
# dom = [('state', '=', 'validated')]
# return dom
# if self.pool.get('res.users').has_group(cr, uid, 'irsid_edu.group_edu_manager'):
# dom = [('state', '=', 'confirmed')]
# return dom
# if self.pool.get('res.users').has_group(cr, uid, 'irsid_edu.group_edu_teacher'):
# dom = [('state', 'in', ['draft'])]
# return dom
# return False
@api.depends('seances.st_hours','seances.seance_hours','seances.emp_hours')
def _compute_hours(self):
self.eff_st_hours = sum(seance.st_hours for seance in self.seances)
self.eff_seance_hours = sum(seance.seance_hours for seance in self.seances)
self.eff_emp_hours = sum(seance.emp_hours for seance in self.seances)
# Fields
module = fields.Many2one(
comodel_name = 'edu.module',
string = 'Module',
required = True,
ondelete = 'cascade',
readonly = True,
states = {'draft': [('readonly', False)]},
)
program = fields.Many2one(
related = 'module.program',
string = 'Program',
readonly = True,
store = True,
)
time = fields.Many2one(
comodel_name = 'edu.time',
string = 'Time',
readonly = True,
states = {'draft': [('readonly', False)]},
)
stage = fields.Many2one(
|
related = 'time.stage',
string = 'Stage',
reado
|
nly = True,
store = True,
)
section = fields.Many2one(
related = 'time.section',
string = 'Section',
readonly = True,
store = True,
)
subsection = fields.Many2one(
related = 'time.subsection',
string = 'Subsection',
readonly = True,
store = True,
)
type = fields.Many2one(
comodel_name = 'edu.work.type',
string = 'Type',
required = True,
readonly = True,
states = {'draft': [('readonly', False)]},
)
scale = fields.Many2one(
comodel_name = 'edu.scale',
string = 'Scale',
readonly = True,
states = {'draft': [('readonly', False)]},
)
ind_work = fields.Boolean(
string = 'Individual Work',
readonly = True,
states = {'draft': [('readonly', False)]},
)
location = fields.Many2one(
comodel_name = 'stock.location',
string = 'Location',
readonly = True,
states = {'draft': [('readonly', False)]},
)
employee = fields.Many2one(
comodel_name = 'hr.employee',
string = 'Employee',
readonly = True,
states = {'draft': [('readonly', False)]},
)
seances = fields.One2many(
comodel_name = 'edu.module.seance',
inverse_name = 'work',
string = 'Seances',
readonly = True,
states = {'draft': [('readonly', False)]},
)
st_hours = fields.Float(
string = 'Student Hours',
required=True,
readonly = True,
states = {'draft': [('readonly', False)]},
)
seance_hours = fields.Float(
string = 'Seance Hours',
required=True,
readonly = True,
states = {'draft': [('readonly', False)]},
)
emp_hours = fields.Float(
string = 'Employee Hours',
required=True,
readonly = True,
st
|
google/pigweed
|
pw_build_mcuxpresso/py/pw_build_mcuxpresso/__main__.py
|
Python
|
apache-2.0
| 1,865
| 0
|
#!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Command line interface for mcuxpresso_builder."""
import argparse
import pathlib
import sys
from pw_build_mcuxpresso import components
def _parse_args() -> argparse.Namespace:
"""Setup argparse and parse command line args."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command',
metavar='<command>',
|
required=True)
project_parser = subparsers.add_parser(
'project', help='output components of an MCUXpresso project')
project_parser.add_argument('manifest_filename', type=pathlib.Path)
project_parser.add_argument('--include', type=str, action='append')
project_parser.add_argument('--exclude', type=str, action='append')
project_parser.add_argument('--prefix', dest='path_prefix', type=str)
return p
|
arser.parse_args()
def main():
"""Main command line function."""
args = _parse_args()
if args.command == 'project':
components.project(args.manifest_filename,
include=args.include,
exclude=args.exclude,
path_prefix=args.path_prefix)
sys.exit(0)
if __name__ == '__main__':
main()
|
IronSlayer/bladeRF-Rpi2
|
bladeRF_transceiver1.py
|
Python
|
gpl-3.0
| 12,438
| 0.004181
|
#!/usr/bin/env python2
##################################################
# GNU Radio Python Flow Graph
# Title: bladeRF_transceiver
# Author: Renzo Chan Rios
# Generated: Sun Mar 13 00:27:54 2016
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import cc1111
import math
import osmosdr
import time
class bladeRF_transceiver(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "bladeRF_transceiver")
##################################################
# Variables
##################################################
self.symbole_rate = symbole_rate = 10e3
self.samp_rate = samp_rate = 1e6
self.rat_interop = rat_interop = 8
self.rat_decim = rat_decim = 5
self.firdes_transition_width = firdes_transition_width = 15000
self.firdes_decim = firdes_decim = 4
self.firdes_cuttoff = firdes_cuttoff = 21e3
self.tx_rf_gain = tx_rf_gain = 10
self.tx_bb_gain = tx_bb_gain = -20
self.samp_per_sym_source = samp_per_sym_source = ((samp_rate/2/firdes_decim)*rat_interop/rat_decim) / symbole_rate
self.samp_per_sym = samp_per_sym = int(samp_rate / symbole_rate)
self.rx_rf_gain = rx_rf_gain = 3
self.rx_bb_gain = rx_bb_gain = 20
self.preamble = preamble = '0101010101010101'
self.msg_source_msgq_in = msg_source_msgq_in = gr.msg_queue(2)
self.msg_sink_msgq_out = msg_sink_msgq_out = gr.msg_queue(2)
self.frequency_tx = frequency_tx = 450e6
self.frequency_shift = frequency_shift = 520000
self.frequency_rx = frequency_rx = 450.0e6
self.firdes_filter = firdes_filter = firdes.low_pass(1,samp_rate/2, firdes_cuttoff, firdes_transition_width)
self.bit_per_sym = bit_per_sym = 1
self.bandwith = bandwith = 6e6
self.access_code = access_code = '11010011100100011101001110010001'
##################################################
# Blocks
##################################################
self.xlating_fir_filter_1 = filter.freq_xlating_fir_filter_ccc(2, (1, ), frequency_shift, samp_rate)
self.xlating_fir_filter_0 = filter.freq_xlating_fir_filter_ccc(firdes_decim, (firdes_filter), 0, samp_rate/2)
self.throttle = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate/2,True)
self.rational_resampler = filter.rational_resampler_ccc(
interpolation=rat_interop,
decimation=rat_decim,
taps=None,
fractional_bw=None,
)
self.quadrature_demod = analog.quadrature_demod_cf(2)
self.osmosdr_source = osmosdr.source( args="numchan=" + str(1) + " " + "bladerf=1" )
self.osmosdr_source.set_time_now(osmosdr.time_spec_t(time.time()), osmosdr.ALL_MBOARDS)
self.osmosdr_source.set_sample_rate(samp_rate)
self.osmosdr_source.set_center_freq(frequency_rx-frequency_shift, 0)
self.osmosdr_source.set_freq_corr(0, 0)
self.osmosdr_source.set_dc_offset_mode(0, 0)
self.osmosdr_source.set_iq_balance_mode(2, 0)
self.osmosdr_source.set_gain_mode(False, 0)
self.osmosdr_source.set_gain(rx_rf_gain, 0)
self.osmosdr_source.set_if_gain(0, 0)
self.osmosdr_source.set_bb_gain(rx_bb_gain, 0)
self.osmosdr_source.set_antenna("", 0)
self.osmosdr_source.set_bandwidth(bandwith, 0)
self.osmosdr_sink = osmosdr.sink( args="numchan=" + str(1) + " " + "bladerf=1" )
self.osmosdr_sink.set_sample_rate(samp_rate)
self.osmosdr_sink.set_center_freq(frequency_tx, 0)
self.osmosdr_sink.set_freq_corr(0, 0)
self.osmosdr_sink.set_gain(tx_rf_gain, 0)
self.osmosdr_sink.set_if_gain(0, 0)
self.osmosdr_sink.set_bb_gain(tx_bb_gain, 0)
self.osmosdr_sink.set_antenna("", 0)
self.osmosdr_sink.set_bandwidth(bandwith, 0)
self.gmsk_mod = digital.gmsk_mod(
samples_per_symbol=int(samp_per_sym),
bt=0.5,
verbose=False,
log=False,
)
self.correlate_access_code = digital.correlate_access_code_bb(access_code, 4)
self.clock_recovery = digital.clock_recovery_mm_ff(samp_per_sym_source*(1+0.0), 0.25*0.175*0.175, 0.5, 0.175, 0.005)
self.cc1111_packet_encoder = cc1111.cc1111_packet_mod_base(cc1111.cc1111_packet_encoder(
samples_per_symbol=samp_per_sym,
bits_per_symbol=bit_per_sym,
preamble=preamble,
access_code=access_code,
pad_for_usrp=True,
do_whitening=True,
add_crc=True
),
source_queue=msg_source_msgq_in
)
self.cc1111_packet_decoder = cc1111.cc1111_packet_decoder(msg_sink_msgq_out,True, True, False, True)
self.blocks_null_sink_0 = blocks.null_sink(gr.sizeof_char*1)
self.binary_slicer = digital.binary_slicer_fb()
##################################################
# Connections
##################################################
self.connect((self.binary_slicer, 0), (self.correlate_access_code, 0))
self.connect((self.cc1111_packet_decoder, 0), (self.blocks_null_sink_0, 0))
self.connect((self.cc1111_packet_encoder, 0), (self.gmsk_mod, 0))
self.connect((self.clock_recovery, 0), (self.binary_slicer, 0))
self.connect((self.correlate_access_code, 0), (self.cc1111_packet_decoder, 0))
self.connect((self.gmsk_mod, 0), (self.osmosdr_sink, 0))
self.connect((self.osmosdr_source, 0), (self.xlating_fir_filter_1, 0))
self.connect((self.quadrature_demod, 0), (self.clock_recovery, 0))
self.connect((self.rational_resampler, 0), (self.quadrature_demod, 0))
self.connect((self.throttle, 0), (self.xlating_fir_filter_0, 0))
self.connect((self.xlating_fir_filter_0, 0), (self.rational_resampler, 0))
self.connect((self.xlating_fir_filter_1, 0), (self.throttle, 0))
def get_symbole_rate(self):
return self.symbole_rate
def set_symbole_rate(s
|
elf, symbole_rate):
self.symbole_rate = symbole_rate
self.set_samp_per_sym(int(self.samp_rate / self.symbole_rate))
self.set_samp_per_sym_source(((self.samp_rate/2/self.firdes_decim)*self.rat_interop/self.rat_decim) / self.symbole_rate)
def get_samp_rate(sel
|
f):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_firdes_filter(firdes.low_pass(1,self.samp_rate/2, self.firdes_cuttoff, self.firdes_transition_width))
self.set_samp_per_sym(int(self.samp_rate / self.symbole_rate))
self.set_samp_per_sym_source(((self.samp_rate/2/self.firdes_decim)*self.rat_interop/self.rat_decim) / self.symbole_rate)
self.osmosdr_sink.set_sample_rate(self.samp_rate)
self.osmosdr_source.set_sample_rate(self.samp_rate)
self.throttle.set_sample_rate(self.samp_rate/2)
def get_rat_interop(self):
return self.rat_interop
def set_rat_interop(self, rat_interop):
self.rat_interop = rat_interop
self.set_samp_per_sym_source(((self.samp_rate/2/self.firdes_decim)*self.rat_interop/self.rat_decim) / self.symbole_rate)
def get_rat_decim(self):
return self.rat_decim
def set_rat_decim(self, rat_decim):
self.rat_decim = rat_decim
self.set_samp_per_sym_source(((self.samp_rate/2/self.firdes_decim)*self.rat_interop/self.rat_decim) / self.symbole_rate)
def get_firdes_transition_width(self):
return self.firdes_transition_width
def set_firdes_transition_width(self, firdes_transition_width):
self.firdes_transition_width = firdes_transition_width
self.set_fir
|
WebArchivCZ/Seeder
|
Seeder/www/urls.py
|
Python
|
mit
| 3,183
| 0.002199
|
from django.urls import path, re_path, include
from django.utils.translation import ugettext_lazy as _
from . import views as www
urlpatterns = [
re_path(r'^$', www.Index.as_view(), name='index'),
re_path(_('^topic_collections_url$'), www.TopicCollections.as_view(),
name='topic_collections'),
re_path(_('^topic_collections_url/(?P<slug>[\w-]+)$'),
www.CollectionDetail.as_view(),
name='collection_detail'),
re_path(_('^about_url$'), www.About.as_view(), name='about'),
re_path(_('^more_about_url$'), www.MoreAbout.as_view(), name='more_about'),
re_path(_('^about_harvests_url$'),
www.AboutHarvest.as_view(), name='about_harvests'),
re_path(_('^about_terminology_url$'),
www.AboutTerminology.as_view(), name='about_terminology'),
re_path(_('^about_documents_url$'),
www.AboutDocuments.as_view(), name='about_documents'),
re_path(_('^about_graphics_url$'),
www.AboutGraphics.as_view(), name='about_graphics'),
re_path(_('^about_contact_url$'),
www.AboutContact.as_view(), name='about_contact'),
re_path(_('^about_faq_url$'), www.AboutFAQ.as_view(), name='about_faq'),
re_path(_('^categories_url$'), www.Categories.as_view(), name='categories'),
re_path(_('^categories_url/(?P<slug>[\w-]+)$'),
www.CategoryDetail.as_view(),
name='category_detail'),
re_path(_('^categories_url/(?P<category_slug>[\w-]+)/(?P<slug>[\w-]+)$'),
|
www.SubCategoryDetail.as_view(),
name='sub_category_detail'),
re_path(_('^change_list_view/(?P<list_type>visual|text)$'),
www.ChangeListView.as_view(),
name='change_list_view'),
re_path(_('^keyword_url/(?P<slug>[\w-]+)$'),
www.KeywordViews.as_view(),
name='keyword'),
re_path(_('^search_url$'), www.SearchRedi
|
rectView.as_view(),
name='search_redirect'),
re_path(_('^search_url/(?P<query>.*)'), www.SearchView.as_view(),
name='search'),
re_path(_('^www_source_url/(?P<slug>[\w-]+)$'),
www.SourceDetail.as_view(),
name='source_detail'),
re_path(_('^www_nominate_url$'), www.Nominate.as_view(), name='nominate'),
re_path(_('^www_nominate_success_url$'), www.NominateSuccess.as_view(),
name='nominate_success'),
re_path(_('^www_nominate_url/contract_url$'),
www.NominateContractView.as_view(),
name='nominate_contract'),
re_path(_('^www_nominate_url/cooperation_url$'),
www.NominateCooperationView.as_view(),
name='nominate_cooperation'),
re_path(_('^www_nominate_url/creative_commons_url$'),
www.NominateCreativeCommonsView.as_view(),
name='nominate_creative_commons'),
re_path(_('^www_nominate_url/source_selection_url$'),
www.NominateSourceSelectionView.as_view(),
name='nominate_source_selection'),
re_path(_('^disclaimer_url$'),
www.DisclaimerView.as_view(),
name='disclaimer'),
re_path(_('^embed_url$'),
www.EmbedView.as_view(),
name='embed'),
]
|
dardevelin/rhythmbox-gnome-fork
|
plugins/lyrics/WinampcnParser.py
|
Python
|
gpl-2.0
| 3,373
| 0.024311
|
# -
|
*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2007 Austin <austiny@sohu.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either v
|
ersion 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import urllib
import re
import rb
from xml.dom import minidom
def detect_charset(s):
charsets = ('iso-8859-1', 'gbk', 'utf-8')
for charset in charsets:
try:
return unicode(unicode(s, 'utf-8').encode(charset), 'gbk')
except:
continue
return s
class WinampcnParser(object):
def __init__(self, artist, title):
self.artist = artist
self.title = title
def search(self, callback, *data):
# encode search string
title_encode = urllib.quote(detect_charset(self.title).encode('gbk').replace(' ', ''))
artist_encode = urllib.quote(detect_charset(self.artist).encode('gbk').replace(' ',''))
url = 'http://www.winampcn.com/lyrictransfer/get.aspx?song=%s&artist=%s&lsong=%s&Datetime=20060601' % (title_encode, artist_encode, title_encode)
loader = rb.Loader()
loader.get_url (url, self.got_lyrics, callback, *data)
def got_lyrics(self, xmltext, callback, *data):
# retrieve xml content
if xmltext is None:
callback (None, *data)
return
try:
xmltext = xmltext.decode('gbk').encode('UTF-8')
xmltext = xmltext.replace('encoding="gb2312"', 'encoding="UTF-8"')
xmldoc = minidom.parseString(xmltext)
root = xmldoc.documentElement
lrcurl = root.getElementsByTagName('LyricUrl')[0].childNodes[0].data
if lrcurl is None:
callback (xmltext, *data)
return
# download the lyrics file
lrcurl_encode = urllib.quote(detect_charset(lrcurl).encode('gbk'))
lrcurl_encode = lrcurl_encode.replace('%3A', ':');
loader = rb.Loader()
loader.get_url (lrcurl_encode, self.parse_lyrics, callback, *data)
except:
callback (None, *data)
def parse_lyrics(self, lyrics, callback, *data):
if lyrics is None:
callback (None, *data)
return
# transform it into plain text
lrcplaintext = lyrics
try:
lrcplaintext = re.sub('\[.*?\]', '', lrcplaintext)
lrcplaintext = lrcplaintext.decode('gbk').encode('UTF-8')
except:
callback (lrcplaintext, *data)
return
# callback and show
lrcplaintext += "\n\nLyrics provided by winampcn.com"
callback(lrcplaintext, *data)
|
modulo-/knoydart
|
api/api_0/apiRequest/Welcome.py
|
Python
|
apache-2.0
| 153
| 0
|
from
|
flask.ext import restful
from . import api
class W
|
elcome(restful.Resource):
def get(self):
return api.send_static_file('index.html')
|
YilunZhou/Klampt
|
Python/klampt/src/rootfind.py
|
Python
|
bsd-3-clause
| 3,866
| 0.016296
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
Python interface to KrisLibrary nonlinear, multidimensional root finding routines
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_rootfind', [dirname(__file__)])
except ImportError:
import _rootfind
return _rootfind
if fp is not None:
try:
_mod = imp.load_module('_rootfind', fp, pathname, description)
finally:
fp.close()
return _mod
_rootfind = swig_import_helper()
del swig_import_helper
else:
import _rootfind
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def setFTolerance(*args):
"""
setFTolerance(double tolf)
Sets the termination threshold for the change in f.
"""
return _rootfind.setFTolerance(*args)
def setXTolerance(*args):
"""
setXTolerance(double tolx)
Sets the termination threshold for the change in x.
"""
return _rootfind.setXTolerance(*args)
def setVectorField(*args):
"""
setVectorField(PyObject * pVFObj) -> int
Sets the vector field object, returns 0 if pVFObj = NULL, 1 otherwise.
See vectorfield.py for an abstract base class that can be overridden
to produce one of these objects.
"""
return _rootfind.setVectorField(*args)
def findRoots(*args):
"""
findRoots(PyObject * startVals, int iter) -> PyObject *
Performs unconstrained root finding for up to iter iterations Return
values is a tuple indicating: (0,x,n) : convergence reached in x
(1,x,n) : convergence reached in f
(2,x,n) : divergence
(3,x,n) : degeneration of gradient (local extremum or saddle point)
(4,x,n) : maximum iterations reached
(5,x,n) : numerical error occurred where x is the final point and n is
the number of iterations used
"""
return _rootfind.findRoots(*args)
def findRootsBounded(*args):
"""
findRootsBounded(PyObject * startVals, PyObject * boundVals, int iter) -> PyObject *
|
Same as findRoots, but with given bounds (xmin,xmax)
"""
return _rootfind.findRootsBounded(*args)
def destroy():
"""
destroy()
destroys internal data structures
destroys internal
|
data structures
"""
return _rootfind.destroy()
# This file is compatible with both classic and new-style classes.
|
sander76/home-assistant
|
homeassistant/components/dte_energy_bridge/sensor.py
|
Python
|
apache-2.0
| 3,635
| 0.00055
|
"""Support for monitoring energy usage using the DTE energy bridge."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import CONF_NAME, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_IP_ADDRESS = "ip"
CONF_VERSION = "version"
DEFAULT_NAME = "Current Energy Usage"
DEFAULT_VERSION = 1
ICON = "mdi:flash"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.All(
vol.Coerce(int), vol.Any(1, 2)
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the DTE energy bridge sensor."""
name = config[CONF_NAME]
ip_address = config[CONF_IP_ADDRESS]
version = config[CONF_VERSION]
add_entities([DteEnergyBridgeSensor(ip_address, name, version)], True
|
)
class DteEnergyBridgeSensor(SensorEntity):
"""Implementation of the DTE Energy Bridge sensors."""
_attr_state_class = STATE_CLASS_MEASUREMENT
def __init__(self, ip_address, name, version):
"""Initialize the sensor."""
self._version = version
if self._version == 1:
|
self._url = f"http://{ip_address}/instantaneousdemand"
elif self._version == 2:
self._url = f"http://{ip_address}:8888/zigbee/se/instantaneousdemand"
self._name = name
self._unit_of_measurement = "kW"
self._state = None
@property
def name(self):
"""Return the name of th sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the energy usage data from the DTE energy bridge."""
try:
response = requests.get(self._url, timeout=5)
except (requests.exceptions.RequestException, ValueError):
_LOGGER.warning(
"Could not update status for DTE Energy Bridge (%s)", self._name
)
return
if response.status_code != HTTP_OK:
_LOGGER.warning(
"Invalid status_code from DTE Energy Bridge: %s (%s)",
response.status_code,
self._name,
)
return
response_split = response.text.split()
if len(response_split) != 2:
_LOGGER.warning(
'Invalid response from DTE Energy Bridge: "%s" (%s)',
response.text,
self._name,
)
return
val = float(response_split[0])
# A workaround for a bug in the DTE energy bridge.
# The returned value can randomly be in W or kW. Checking for a
# a decimal seems to be a reliable way to determine the units.
# Limiting to version 1 because version 2 apparently always returns
# values in the format 000000.000 kW, but the scaling is Watts
# NOT kWatts
if self._version == 1 and "." in response_split[0]:
self._state = val
else:
self._state = val / 1000
|
himanshuo/osf.io
|
website/project/views/comment.py
|
Python
|
apache-2.0
| 8,683
| 0.000461
|
# -*- coding: utf-8 -*-
import collections
import httplib as http
import pytz
from flask import request
from modularodm import Q
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from framework.auth.utils import privacy_info_handle
from framework.forms.utils import sanitize
from website import settings
from website.notifications.emails import notify
from website.filters import gravatar
from website.models import Guid, Comment
from website.project.decorators import must_be_contributor_or_public
from datetime import datetime
from website.project.model import has_anonymous_link
def resolve_target(node, guid):
if not guid:
return node
target = Guid.load(guid)
if target is None:
raise HTTPError(http.BAD_REQUEST)
return target.referent
def collect_discussion(target, users=None):
users = users or collections.defaultdict(list)
for comment in getattr(target, 'commented', []):
if not comment.is_deleted:
users[comment.user].append(comment)
collect_discussion(comment, users=users)
return users
@must_be_contributor_or_public
def comment_discussion(**kwargs):
node = kwargs['node'] or kwargs['project']
auth = kwargs['auth']
users = collect_discussion(node)
anonymous = has_anonymous_link(node, auth)
# Sort users by comment frequency
# TODO: Allow sorting by recency, combination of frequency and recency
sorted_users = sorted(
users.keys(),
key=lambda item: len(users[item]),
reverse=True,
)
return {
'discussion': [
{
'id': privacy_info_handle(user._id, anonymous),
'url': privacy_info_handle(user.url, anonymous),
'fullname': privacy_info_handle(user.fullname, anonymous, name=True),
'isContributor': node.is_contributor(user),
'gravatarUrl': privacy_info_handle(
gravatar(
user, use_ssl=True, size=settings.GRAVATAR_SIZE_DISCUSSION,
),
anonymous
),
}
for user in sorted_users
]
}
def serialize_comment(comment, auth, anonymous=False):
return {
'id': comment._id,
'author': {
'id': privacy_info_handle(comment.user._id, anonymous),
'url': privacy_info_handle(comment.user.url, anonymous),
'name': privacy_info_handle(
comment.user.fullname, anonymous, name=True
),
'gravatarUrl': privacy_info_handle(
gravatar(
comment.user, use_ssl=True,
size=settings.GRAVATAR_SIZE_DISCUSSION
),
anonymous
),
},
'dateCreated': comment.date_created.isoformat(),
'dateModified': comment.date_modified.isoformat(),
'content': comment.content,
'hasChildren': bool(getattr(comment, 'commented', [])),
'canEdit': comment.user == auth.user,
'modified': comment.modified,
'isDeleted': comment.is_deleted,
'isAbuse': auth.user and auth.user._id in comment.reports,
}
def serialize_comments(record, auth, anonymous=F
|
alse):
return [
serialize_comment(comment, auth, anonymous)
for comment in getattr(record, 'commented', [])
|
]
def kwargs_to_comment(kwargs, owner=False):
comment = Comment.load(kwargs.get('cid'))
if comment is None:
raise HTTPError(http.BAD_REQUEST)
if owner:
auth = kwargs['auth']
if auth.user != comment.user:
raise HTTPError(http.FORBIDDEN)
return comment
@must_be_logged_in
@must_be_contributor_or_public
def add_comment(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if not node.comment_level:
raise HTTPError(http.BAD_REQUEST)
if not node.can_comment(auth):
raise HTTPError(http.FORBIDDEN)
guid = request.json.get('target')
target = resolve_target(node, guid)
content = request.json.get('content').strip()
content = sanitize(content)
if not content:
raise HTTPError(http.BAD_REQUEST)
if len(content) > settings.COMMENT_MAXLENGTH:
raise HTTPError(http.BAD_REQUEST)
comment = Comment.create(
auth=auth,
node=node,
target=target,
user=auth.user,
content=content,
)
comment.save()
context = dict(
node_type=node.project_or_component,
timestamp=datetime.utcnow().replace(tzinfo=pytz.utc),
commenter=auth.user,
gravatar_url=auth.user.gravatar_url,
content=content,
target_user=target.user if is_reply(target) else None,
parent_comment=target.content if is_reply(target) else "",
title=node.title,
node_id=node._id,
url=node.absolute_url
)
sent_subscribers = notify(uid=node._id, event="comments", **context)
if is_reply(target):
if target.user and target.user not in sent_subscribers:
notify(uid=target.user._id, event='comment_replies', **context)
return {
'comment': serialize_comment(comment, auth)
}, http.CREATED
def is_reply(target):
return isinstance(target, Comment)
@must_be_contributor_or_public
def list_comments(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
guid = request.args.get('target')
target = resolve_target(node, guid)
serialized_comments = serialize_comments(target, auth, anonymous)
n_unread = 0
if node.is_contributor(auth.user):
if auth.user.comments_viewed_timestamp is None:
auth.user.comments_viewed_timestamp = {}
auth.user.save()
n_unread = n_unread_comments(target, auth.user)
return {
'comments': serialized_comments,
'nUnread': n_unread
}
def n_unread_comments(node, user):
"""Return the number of unread comments on a node for a user."""
default_timestamp = datetime(1970, 1, 1, 12, 0, 0)
view_timestamp = user.comments_viewed_timestamp.get(node._id, default_timestamp)
return Comment.find(Q('node', 'eq', node) &
Q('user', 'ne', user) &
Q('date_created', 'gt', view_timestamp) &
Q('date_modified', 'gt', view_timestamp)).count()
@must_be_logged_in
@must_be_contributor_or_public
def edit_comment(**kwargs):
auth = kwargs['auth']
comment = kwargs_to_comment(kwargs, owner=True)
content = request.json.get('content').strip()
content = sanitize(content)
if not content:
raise HTTPError(http.BAD_REQUEST)
if len(content) > settings.COMMENT_MAXLENGTH:
raise HTTPError(http.BAD_REQUEST)
comment.edit(
content=content,
auth=auth,
save=True
)
return serialize_comment(comment, auth)
@must_be_logged_in
@must_be_contributor_or_public
def delete_comment(**kwargs):
auth = kwargs['auth']
comment = kwargs_to_comment(kwargs, owner=True)
comment.delete(auth=auth, save=True)
return {}
@must_be_logged_in
@must_be_contributor_or_public
def undelete_comment(**kwargs):
auth = kwargs['auth']
comment = kwargs_to_comment(kwargs, owner=True)
comment.undelete(auth=auth, save=True)
return {}
@must_be_logged_in
@must_be_contributor_or_public
def update_comments_timestamp(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
if node.is_contributor(auth.user):
auth.user.comments_viewed_timestamp[node._id] = datetime.utcnow()
auth.user.save()
list_comments(**kwargs)
return {node._id: auth.user.comments_viewed_timestamp[node._id].isoformat()}
else:
return {}
@must_be_logged_in
@must_be_contributor_or_public
def report_abuse(**kwargs):
auth = kwargs['auth']
user = auth.user
comment = kwargs_to_comment(kwargs)
category = request.json.get('category')
text = request.json.get('text', '')
if not category:
raise
|
alexandrucoman/vbox-nova-driver
|
nova/utils.py
|
Python
|
apache-2.0
| 42,459
| 0.000377
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import functools
import hashlib
import hmac
import inspect
import os
import pyclbr
import random
import re
import shutil
import socket
import struct
import sys
import tempfile
from xml.sax import saxutils
import eventlet
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_context import context as common_context
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from nova import exception
from nova.i18n import _, _LE, _LW
notify_decorator = 'nova.notifications.notify_decorator'
monkey_patch_opts = [
cfg.BoolOpt('monkey_patch',
default=False,
help='Whether to log monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[
'nova.api.ec2.cloud:%s' % (notify_decorator),
'nova.compute.api:%s' % (notify_decorator)
],
help='List of modules/decorators to monkey patch'),
]
utils_opts = [
cfg.IntOpt('password_length',
default=12,
help='Length of generated ins
|
tance admin passwords'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='Time period to generate instance usages for. '
'Time period must be hour, day, month or year'),
cfg.StrOpt('rootwrap_config',
default="/etc/nova/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.StrO
|
pt('tempdir',
help='Explicitly specify the temporary working directory'),
]
""" This group is for very specific reasons.
If you're:
- Working around an issue in a system tool (e.g. libvirt or qemu) where the fix
is in flight/discussed in that community.
- The tool can be/is fixed in some distributions and rather than patch the code
those distributions can trivially set a config option to get the "correct"
behavior.
This is a good place for your workaround.
Please use with care!
Document the BugID that your workaround is paired with."""
workarounds_opts = [
cfg.BoolOpt('disable_rootwrap',
default=False,
help='This option allows a fallback to sudo for performance '
'reasons. For example see '
'https://bugs.launchpad.net/nova/+bug/1415106'),
cfg.BoolOpt('disable_libvirt_livesnapshot',
default=True,
help='When using libvirt 1.2.2 fails live snapshots '
'intermittently under load. This config option provides '
'mechanism to disable livesnapshot while this is '
'resolved. See '
'https://bugs.launchpad.net/nova/+bug/1334398'),
cfg.BoolOpt('destroy_after_evacuate',
default=True,
help='Whether to destroy instances on startup when we suspect '
'they have previously been evacuated. This can result in '
'data loss if undesired. See '
'https://launchpad.net/bugs/1419785'),
]
CONF = cfg.CONF
CONF.register_opts(monkey_patch_opts)
CONF.register_opts(utils_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.register_opts(workarounds_opts, group='workarounds')
LOG = logging.getLogger(__name__)
# used in limits
TIME_UNITS = {
'SECOND': 1,
'MINUTE': 60,
'HOUR': 3600,
'DAY': 86400
}
_IS_NEUTRON = None
synchronized = lockutils.synchronized_with_prefix('nova-')
SM_IMAGE_PROP_PREFIX = "image_"
SM_INHERITABLE_KEYS = (
'min_ram', 'min_disk', 'disk_format', 'container_format',
)
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns Boolean indicating whether the vpn_server is listening.
Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
# NOTE(tonyb) session_id isn't used for a real VPN connection so using a
# cryptographically weak value is fine.
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
LOG.warning(_LW('Expected to receive %(exp)s bytes, '
'but actually %(act)s'),
dict(exp=struct.calcsize(fmt), act=len(received)))
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
return (identifier == 0x40 and client_sess == session_id)
def _get_root_helper():
if CONF.workarounds.disable_rootwrap:
cmd = 'sudo'
else:
cmd = 'sudo nova-rootwrap %s' % CONF.rootwrap_config
return cmd
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.execute(*cmd, **kwargs)
def trycmd(*args, **kwargs):
"""Convenience wrapper around oslo's trycmd() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.trycmd(*args, **kwargs)
def novadir():
import nova
return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for _x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None, before=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'mont
|
cipriancraciun/extremely-simple-cluster-platform
|
components/py-messaging/sources/escp/messaging/rpc_testing.py
|
Python
|
gpl-3.0
| 2,789
| 0.062029
|
import Queue as queue
import escp.messaging.rpc as rpc
import escp.messaging.wrappers as wrappers
import escp.tools.testing as testing
from escp.messaging.wrappers_testing import _construct_wrapper
@testing.fixture
def needs_rpc_client (_test, member = 'client', session = 'session', callback_agent = None, callback_port = None, coder = 'coder', auto_initialize = True, auto_finalize = None) :
return _needs_client (_test, rpc.Client, member, session, callback_agent, callback_port, coder, auto_initialize, auto_finalize)
def _needs_client (_test, _class, _member, _session, _callback_agent, _callback_port, _coder, _auto_initialize, _auto_finalize) :
if _auto_initialize is None : _auto_unregister = _auto_register
def _constructor (__context = None, * __metadata) :
_client = _construct_wrapper (_class, __context, _session, _callback_agent, _callback_port, _coder)
_initialize_rpc (_client, _auto_initialize)
return _client
def _destructor (_client, __context = None, * __metadata) :
_finalize_rpc (_client, _auto_finalize)
return
return testing.append_context_member_hook (_test, _member, _constructor, _destructor, _accepts_metadata = True)
@testing.fixture
def needs_target_echo_rpc_server (_test, member = 'server', session = 'session', agent = 'agent', tar
|
get = 'target', coder = 'coder', auto_initialize = True, auto_finalize = None) :
return _needs_echo_server (_test, rpc.TargetServer, member, session, agent, target, coder, auto_in
|
itialize, auto_finalize)
@testing.fixture
def needs_port_echo_rpc_server (_test, member = 'server', session = 'session', agent = 'agent', port = 'port', coder = 'coder', auto_initialize = True, auto_finalize = None) :
return _needs_echo_server (_test, rpc.PortServer, member, session, agent, port, coder, auto_initialize, auto_finalize)
def _needs_echo_server (_test, _class, _member, _session, _agent, _target_or_port, _coder, _auto_initialize, _auto_finalize) :
if _auto_initialize is None : _auto_unregister = _auto_register
_callable = lambda _request : _request
def _constructor (__context = None, * __metadata) :
_server = _construct_wrapper (_class, __context, _session, _agent, _target_or_port, _coder, _callable)
_initialize_rpc (_server, _auto_initialize)
return _server
def _destructor (_server, __context = None, * __metadata) :
_finalize_rpc (_server, _auto_finalize)
return
return testing.append_context_member_hook (_test, _member, _constructor, _destructor, _accepts_metadata = True)
def _initialize_rpc (_server, _auto) :
if _auto :
_outcome = _server.initialize_sync (1.0, Exception ())
assert _outcome
return
def _finalize_rpc (_server, _auto) :
if _auto :
_outcome = _server.finalize_sync (1.0, Exception ())
assert _outcome
return
|
MakarenaLabs/Orator-Google-App-Engine
|
orator/query/processors/sqlite_processor.py
|
Python
|
mit
| 424
| 0
|
# -*- coding: utf-8 -*-
from .processor import QueryProcessor
class SQLiteQueryProcessor(QueryProcessor):
def process_column_listing(self, results):
"""
Process the results of a column listing query
:param results: The query results
:type results: dict
:return:
|
The processed results
:return: dict
"""
return map(lambda x: x['column_name'], results)
|
|
car3oon/saleor
|
saleor/site/models.py
|
Python
|
bsd-3-clause
| 811
| 0
|
from django.contrib.sites.models import _simple_domain_name_validator
from django.db import models
from django.utils.translation import pgettext_lazy
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SiteSettings(models.Model):
domain = models.CharField(
pgettext_lazy('Site field', 'domain'), max_l
|
ength=100,
validators=[_simple_domain_name_validator], unique=True)
name = models.CharField(pgettext_lazy('Site field', 'name'), max_length=50)
header_text = models.CharField(
pgettext_lazy('Site field', 'header text'), max_length=200, blank=True)
description = models.CharField(
pgettext_lazy('Site field', 'site description'), max_len
|
gth=500,
blank=True)
def __str__(self):
return self.name
|
Frostman/eho-horizon
|
openstack_dashboard/dashboards/project/routers/urls.py
|
Python
|
apache-2.0
| 1,319
| 0.000758
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import patterns, url
from .views import (IndexView, CreateView, DetailView)
from .ports.views
|
import (AddInterfaceView, SetGatewayView)
urlpatterns = patterns('horizon.dashboards.project.routers.views',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^create/$', CreateView.as_view(), name='create'),
url(r'^(?P<router_id>[^/]+)/$',
DetailView.as_view(),
name='detail'),
url(r'^(?P<router_id>[^/]+)/addinterf
|
ace', AddInterfaceView.as_view(),
name='addinterface'),
url(r'^(?P<router_id>[^/]+)/setgateway',
SetGatewayView.as_view(),
name='setgateway'),
)
|
aaschaer/globus-sdk-python
|
globus_sdk/transfer/response/base.py
|
Python
|
apache-2.0
| 407
| 0
|
import json
from globus_sdk.response import GlobusHTTPResponse
class TransferResponse(GlobusHTTPResponse):
"""
Base class for :class:`T
|
ransferClient <globus_sdk.TransferClient>`
responses.
"""
def __str__(self):
# Make printing responses more convenient. Relies on the
# fact that Transf
|
er API responses are always JSON.
return json.dumps(self.data, indent=2)
|
Guitar-Machine-Learning-Group/guitar-transcriber
|
dataset.py
|
Python
|
mit
| 2,586
| 0.001547
|
import os
import numpy as np
class Dataset(object):
"""
This class represents a dataset and consists of a list of SongData along with some metadata about the dataset
"""
def __init__(self, songs_data=None):
if songs_data is None:
self.songs_data = []
else:
self.songs_data = songs_data
def add_song(self, song_data):
self.songs_data.append(song_data)
def songs(self):
for s in self.songs_data:
yield s
@property
def num_features(self):
if len(self.songs_data):
return self.songs_data[0].X.shape[1]
@property
def size(self):
return len(self.songs_data)
def __repr__(self):
return ', '.join([s.name for s in self.songs()])
class SongData(object):
"""
This class holds features, labels, and metadata for a song.
"""
def __init__(self, audio_path, label_path):
if not os.path.isfile(audio_path):
raise IOError("Audio file at %s does not exist" % audio_path)
if label_path and not os.path.isfile(label_path):
raise IOError("MIDI file at %s does not exist" % label_path)
self.audio_path = audio_path
self.label_path = label_path
"""
x [num_samples,] is the samples of the song
"""
@property
def x(self):
return s
|
elf.__x
@x.setter
def x(self, x):
self.__x = x
"""
X [num_frames x num_features] is the feature matrix for t
|
he song
"""
@property
def X(self):
return self.__X
@X.setter
def X(self, X):
if hasattr(self, 'Y') and self.Y.shape[0] != X.shape[0]:
raise ValueError("Number of feature frames must equal number of label frames")
self.__X = X
"""
Y [num_frames x num_pitches] is the label matrix for the song
"""
@property
def Y(self):
return self.__Y
@Y.setter
def Y(self, Y):
if hasattr(self, 'X') and self.X.shape[0] != Y.shape[0]:
raise ValueError("Number of label frames must equal number of feature frames")
self.__Y = Y
@property
def num_pitches(self):
if hasattr(self, 'Y'):
return np.shape(self.Y)[1]
return 0
@property
def num_features(self):
if hasattr(self, 'X'):
return self.X.shape[1]
@property
def num_frames(self):
if hasattr(self, 'X'):
return self.X.shape[0]
@property
def name(self):
return os.path.splitext(os.path.split(self.audio_path)[-1])[0]
|
snava10/sqlRunner
|
websqlrunner/websqlrunner/wsgi.py
|
Python
|
apache-2.0
| 402
| 0
|
"""
WSGI config for websqlrunner project.
It exposes the WSGI callable as a module-le
|
vel variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi
|
_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "websqlrunner.settings")
application = get_wsgi_application()
|
Gentux/etalage
|
etalage/__init__.py
|
Python
|
agpl-3.0
| 990
| 0
|
# -*- coding: utf-8 -*-
# Etalage -- Open Data POIs portal
# By: Emmanuel Raviart <eraviart@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/
|
>.
"""Web services for territories"""
conf = {} # Dictionary updated by con
|
fig.environment.load_environment
|
richardliaw/ray
|
python/ray/tests/test_reference_counting_2.py
|
Python
|
apache-2.0
| 11,196
| 0
|
# coding: utf-8
import logging
import os
import signal
import sys
import numpy as np
import pytest
import ray
import ray.cluster_utils
from ray.test_utils import SignalActor, put_object, wait_for_condition
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
logger = logging.getLogger(__name__)
@pytest.fixture
def one_worker_100MiB(request):
config = {
"object_store_full_max_retries": 2,
"task_retry_delay_ms": 0,
"object_timeout_milliseconds": 1000,
}
yield ray.init(
num_cpus=1,
object_store_memory=100 * 1024 * 1024,
_system_config=config)
ray.shutdown()
def _fill_object_store_and_get(obj, succeed=True, object_MiB=40,
num_objects=5):
for _ in range(num_objects):
ray.put(np.zeros(object_MiB * 1024 * 1024, dtype=np.uint8))
if type(obj) is bytes:
obj = ray.ObjectRef(obj)
if succeed:
wait_for
|
_condition(
lambda: ray.worker.global_worker.core_worker.object_exists(obj))
else:
wait_for_condition(
lambda: not ray.worker.global_worker.core_worker.object_exists(obj)
)
# Test t
|
hat an object containing object refs within it pins the inner IDs
# recursively and for submitted tasks.
@pytest.mark.parametrize("use_ray_put,failure", [(False, False), (False, True),
(True, False), (True, True)])
def test_recursively_nest_ids(one_worker_100MiB, use_ray_put, failure):
@ray.remote(max_retries=1)
def recursive(ref, signal, max_depth, depth=0):
unwrapped = ray.get(ref[0])
if depth == max_depth:
ray.get(signal.wait.remote())
if failure:
os._exit(0)
return
else:
return recursive.remote(unwrapped, signal, max_depth, depth + 1)
signal = SignalActor.remote()
max_depth = 5
array_oid = put_object(
np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)
nested_oid = array_oid
for _ in range(max_depth):
nested_oid = ray.put([nested_oid])
head_oid = recursive.remote([nested_oid], signal, max_depth)
# Remove the local reference.
array_oid_bytes = array_oid.binary()
del array_oid, nested_oid
tail_oid = head_oid
for _ in range(max_depth):
tail_oid = ray.get(tail_oid)
# Check that the remote reference pins the object.
_fill_object_store_and_get(array_oid_bytes)
# Fulfill the dependency, causing the tail task to finish.
ray.get(signal.send.remote())
try:
ray.get(tail_oid)
assert not failure
# TODO(edoakes): this should raise WorkerError.
except ray.exceptions.ObjectLostError:
assert failure
# Reference should be gone, check that array gets evicted.
_fill_object_store_and_get(array_oid_bytes, succeed=False)
# Test that serialized ObjectRefs returned from remote tasks are pinned until
# they go out of scope on the caller side.
@pytest.mark.parametrize("use_ray_put,failure", [(False, False), (False, True),
(True, False), (True, True)])
def test_return_object_ref(one_worker_100MiB, use_ray_put, failure):
@ray.remote
def return_an_id():
return [
put_object(
np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)
]
@ray.remote(max_retries=1)
def exit():
os._exit(0)
outer_oid = return_an_id.remote()
inner_oid_binary = ray.get(outer_oid)[0].binary()
# Check that the inner ID is pinned by the outer ID.
_fill_object_store_and_get(inner_oid_binary)
# Check that taking a reference to the inner ID and removing the outer ID
# doesn't unpin the object.
inner_oid = ray.get(outer_oid)[0] # noqa: F841
del outer_oid
_fill_object_store_and_get(inner_oid_binary)
if failure:
# Check that the owner dying unpins the object. This should execute on
# the same worker because there is only one started and the other tasks
# have finished.
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(exit.remote())
else:
# Check that removing the inner ID unpins the object.
del inner_oid
_fill_object_store_and_get(inner_oid_binary, succeed=False)
# Test that serialized ObjectRefs returned from remote tasks are pinned if
# passed into another remote task by the caller.
@pytest.mark.parametrize("use_ray_put,failure", [(False, False), (False, True),
(True, False), (True, True)])
def test_pass_returned_object_ref(one_worker_100MiB, use_ray_put, failure):
@ray.remote
def return_an_id():
return [
put_object(
np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)
]
# TODO(edoakes): this fails with an ActorError with max_retries=1.
@ray.remote(max_retries=0)
def pending(ref, signal):
ray.get(signal.wait.remote())
ray.get(ref[0])
if failure:
os._exit(0)
signal = SignalActor.remote()
outer_oid = return_an_id.remote()
inner_oid_binary = ray.get(outer_oid)[0].binary()
pending_oid = pending.remote([outer_oid], signal)
# Remove the local reference to the returned ID.
del outer_oid
# Check that the inner ID is pinned by the remote task ID and finishing
# the task unpins the object.
ray.get(signal.send.remote())
try:
# Should succeed because inner_oid is pinned if no failure.
ray.get(pending_oid)
assert not failure
except ray.exceptions.WorkerCrashedError:
assert failure
def ref_not_exists():
worker = ray.worker.global_worker
inner_oid = ray.ObjectRef(inner_oid_binary)
return not worker.core_worker.object_exists(inner_oid)
wait_for_condition(ref_not_exists)
# Call a recursive chain of tasks that pass a serialized reference that was
# returned by another task to the end of the chain. The reference should still
# exist while the final task in the chain is running and should be removed once
# it finishes.
@pytest.mark.parametrize("use_ray_put,failure", [(False, False), (False, True),
(True, False), (True, True)])
def test_recursively_pass_returned_object_ref(one_worker_100MiB, use_ray_put,
failure):
@ray.remote
def return_an_id():
return put_object(
np.zeros(40 * 1024 * 1024, dtype=np.uint8), use_ray_put)
@ray.remote(max_retries=1)
def recursive(ref, signal, max_depth, depth=0):
inner_id = ray.get(ref[0])
if depth == max_depth:
ray.get(signal.wait.remote())
if failure:
os._exit(0)
return inner_id
else:
return inner_id, recursive.remote(ref, signal, max_depth,
depth + 1)
max_depth = 5
outer_oid = return_an_id.remote()
signal = SignalActor.remote()
head_oid = recursive.remote([outer_oid], signal, max_depth)
# Remove the local reference.
inner_oid = None
outer_oid = head_oid
for i in range(max_depth):
inner_oid, outer_oid = ray.get(outer_oid)
# Check that the remote reference pins the object.
_fill_object_store_and_get(outer_oid, succeed=False)
# Fulfill the dependency, causing the tail task to finish.
ray.get(signal.send.remote())
try:
# Check that the remote reference pins the object.
ray.get(outer_oid)
_fill_object_store_and_get(inner_oid)
assert not failure
# TODO(edoakes): this should raise WorkerError.
except ray.exceptions.ObjectLostError:
assert failure
inner_oid_bytes = inner_oid.binary()
del inner_oid
del head_oid
del outer_oid
# Reference should be gone, check that returned ID gets evicted.
_fill_object_store_and_get(inner_oid_bytes, succeed=False)
# Call a recursive chain of tasks. The final task in the chain returns an
# ObjectRef
|
sdague/home-assistant
|
homeassistant/components/environment_canada/weather.py
|
Python
|
apache-2.0
| 8,104
| 0.000494
|
"""Platform for retrieving meteorological data from Environment Canada."""
import datetime
import re
from env_canada import ECData # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt
CONF_FORECAST = "forecast"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
if not re.fullmatch(r"[A-Z]{2}/s0000\d{3}", station):
raise vol.error.Invalid('Station ID must be of the form "XX/s0000###"')
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_FORECAST, default="daily"): vol.In(["daily", "hourly"]),
}
)
# Icon codes from http://dd.weatheroffice.ec.gc.ca/citypage_weather/
# docs/current_conditions_icon_code_descriptions_e.csv
ICON_CONDITION_MAP = {
"sunny": [0, 1],
"clear-night": [30, 31],
"partlycloudy": [2, 3, 4, 5, 22, 32, 33, 34, 35],
"cloudy": [10],
"rainy": [6, 9, 11, 12, 28, 36],
"lightning-rainy": [19, 39, 46, 47],
"pouring": [13],
"snowy-rainy": [7, 14, 15, 27, 37],
"snowy": [8, 16, 17, 18, 25, 26, 38, 40],
"windy": [43],
"fog": [20, 21, 23, 24, 44],
"hail": [26, 27],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada weather."""
if config.get(CONF_STATION):
ec_data = ECData(station_id=config[CONF_STATION])
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
ec_data = ECData(coordinates=(lat, lon))
add_devices([ECWeather(ec_data, config)])
class ECWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, ec_data, config):
"""Initialize Environment Canada weather."""
self.ec_data = ec_data
self.platform_name = config.get(CONF_NAME)
self.forecast_type = config[CONF_FORECAST]
@property
def attribution(self):
"""Return the attribution."""
return CONF_ATTRIBUTION
@property
def name(self):
"""Return the name of the weather entity."""
if self.platform_name:
return self.platform_name
return self.ec_data.metadata.get("location")
@property
def temperature(self):
"""Return the temperature."""
if self.ec_data.conditions.get("temperature", {}).get("value"):
return float(self.ec_data.conditions["temperature"]["value"])
if self.ec_data.hourly_forecasts[0].get("temperature"):
return float(self.ec_data.hourly_forecasts[0]["temperature"])
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self):
"""Return the humidity."""
if self.ec_data.conditions.get("humidity", {}).get("value"):
return float(self.ec_data.conditions["humidity"]["value"])
return None
@property
def wind_speed(self):
"""Return the wind speed."""
if self.ec_data.conditions.get("wind_speed", {}).get("value"):
return float(self.ec_data.conditions["wind_speed"]["value"])
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
if self.ec_data.conditions.get("wind_bearing", {}).get("value"):
return float(self.ec_data.conditions["wind_bearing"]["value"])
return None
@property
def pressure(self):
"""Return the pressure."""
if self.ec_data.conditions.get("pressure", {}).get("value"):
return 10 * float(self.ec_data.conditions["pressure"]["value"])
return None
@property
def visibility(self):
"""Return the visibility."""
if self.ec_data.conditions.get("visibility", {}).get("value"):
return float(self.ec_data.conditions["visibility"]["value"])
return None
@property
def condition(self):
"""Return the weather condition."""
icon_code = None
if self.ec_data.conditions.get("icon_code", {}).get("value"):
icon_code = self.ec_data.conditions["icon_code"]["value"]
elif self.ec_data.hourly_forecasts[0].get("icon_code"):
icon_code = self.ec_data.hourly_forecasts[0]["icon_code"]
if icon_code:
return icon_code_to_condition(int(icon_code))
return ""
@property
def foreca
|
st(self):
"""Return the forecast array."""
return get_forecast(self.ec_data, self.forecast_type)
def update(self):
|
"""Get the latest data from Environment Canada."""
self.ec_data.update()
def get_forecast(ec_data, forecast_type):
"""Build the forecast array."""
forecast_array = []
if forecast_type == "daily":
half_days = ec_data.daily_forecasts
if half_days[0]["temperature_class"] == "high":
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.now().isoformat(),
ATTR_FORECAST_TEMP: int(half_days[0]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[1]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[0]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[0]["precip_probability"]
),
}
)
half_days = half_days[2:]
else:
half_days = half_days[1:]
for day, high, low in zip(range(1, 6), range(0, 9, 2), range(1, 10, 2)):
forecast_array.append(
{
ATTR_FORECAST_TIME: (
dt.now() + datetime.timedelta(days=day)
).isoformat(),
ATTR_FORECAST_TEMP: int(half_days[high]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[low]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[high]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[high]["precip_probability"]
),
}
)
elif forecast_type == "hourly":
hours = ec_data.hourly_forecasts
for hour in range(0, 24):
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.as_local(
datetime.datetime.strptime(hours[hour]["period"], "%Y%m%d%H%M")
).isoformat(),
ATTR_FORECAST_TEMP: int(hours[hour]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(hours[hour]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
hours[hour]["precip_probability"]
),
}
)
return forecast_array
def icon_code_to_condition(icon_code):
"""Return the condition corresponding to an icon code."""
for condition, codes in ICON_CONDITION_MAP.items():
if icon_code in codes:
return condition
return None
|
pyspace/test
|
pySPACE/missions/nodes/splitter/all_train_splitter.py
|
Python
|
gpl-3.0
| 2,977
| 0.011085
|
""" Use all available data for training """
import itertools
import log
|
ging
from pySPACE.missions.nodes.base_node import BaseNode
class AllTrainSplitterNode(BaseNode):
""" Use all available data for training
This node allows subsequent nodes to use all available labeled
data for trai
|
ning. Accordingly, no data for testing is provided.
**Parameters**
**Exemplary Call**
.. code-block:: yaml
-
node : All_Train_Splitter
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2009/01/07
"""
def __init__(self, non_persistent = False, use_test_data = True,
*args, **kwargs):
super(AllTrainSplitterNode, self).__init__(*args, **kwargs)
self.set_permanent_attributes(non_persistent=non_persistent)
self.set_permanent_attributes(use_test_data=use_test_data)
def is_split_node(self):
""" Returns whether this is a split node. """
return True
def use_next_split(self):
""" Use the next split of the data into training and test data.
Returns True if more splits are available, otherwise False.
This method is useful for benchmarking
"""
# This source node provides only one single split of the data,
# namely using all data as training data.
return False
def train_sweep(self, use_test_data):
""" Performs the actual training of the node.
.. note:: Split nodes cannot be trained
"""
raise Exception("Split nodes cannot be trained")
def request_data_for_training(self, use_test_data):
""" Returns the data for training of subsequent nodes
.. todo:: to document
"""
self._log("Data for training is requested.", level = logging.DEBUG)
# This splitter uses all data points for training
# self.use_test_data is True
train_data = \
itertools.imap(lambda (data, label) : (data, label),
self.input_node.request_data_for_training(use_test_data = self.use_test_data))
self._log("Data for training finished", level = logging.DEBUG)
return train_data
def request_data_for_testing(self):
""" Returns the data for testing of subsequent nodes
.. todo:: to document
"""
self._log("Data for testing is requested.", level = logging.DEBUG)
self._log("Returning iterator over empty sequence.", level = logging.DEBUG)
return (x for x in [].__iter__())
# def __getstate__(self):
# """ Return a pickable state for this object """
# odict = super(AllTrainSplitterNode, self).__getstate__()
# if self.non_persistent == True:
# odict['data_for_training'] = None
# odict['data_for_testing'] = None
# return odict
_NODE_MAPPING = {"All_Train_Splitter": AllTrainSplitterNode}
|
sio2project/oioioi
|
oioioi/exportszu/utils.py
|
Python
|
gpl-3.0
| 6,333
| 0.000474
|
import csv
import os
import shutil
import tarfile
import tempfile
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.utils.encoding import force_text
from oioioi.filetracker.client import get_client
from oioioi.filetracker.utils import django_to_filetracker_path
from oioioi.participants.models import Participant
from oioioi.programs.models import ProgramSubmission
from oioioi.programs.utils import get_extension
class SubmissionData(object):
submission_id = None
user_id = None
username = None
first_name = None
last_name = None
city = None
school = None
school_city = None
problem_short_name = None
score = None
solution_language = None
source_file = None
class SubmissionsWithUserDataCollector(object):
"""
Collects submissions with some associated data in specific contest with
some filtering.
We want the user of collector objects to know nothing (or very little)
about the database, controller logic etc. It is responsibility of
the collector to provide access to fully prepared data.
"""
def __init__(
self, contest, round=None, problem_instance=None, language=None, only_final=True
):
self.contest = contest
self.round = round
self.problem_instance = problem_instance
if language:
exts = getattr(settings, 'SUBMITTABLE_EXTENSIONS', {})
if language not in exts:
raise InvalidValue("Invalid programming language")
self.lang_exts = exts[language]
else:
self.lang_exts = None
self.only_final = only_final
self.filetracker = get_client()
def get_contest_id(self):
return self.contest.id
def collect_list(self):
ccontroller = self.contest.controller
q_expressions = Q(user__isnull=False)
if self.round:
q_expressions &= Q(problem_instance__round=self.round)
else:
q_expressions &= Q(problem_instance__contest=self.contest)
if self.problem_instance:
q_expressions &= Q(problem_instance=self.problem_instance)
if self.lang_exts:
q_expr_langs = Q()
for ext in self.lang_exts:
q_expr_langs |= Q(source_file__contains='.%s@' % ext)
q_expressions &= q_expr_langs
if self.only_final:
q_expressions &= Q(submissionreport__userresultforproblem__isnull=False)
submissions_list = []
psubmissions = ProgramSubmission.objects.filter(q_expressions).select_related()
for s in psubmissions:
data = SubmissionData()
data.submission_id = s.id
data.user_id = s.user_id
data.username = s.user.username
data.first_name = s.user.first_name
data.last_name = s.user.last_name
data.problem_short_name = s.problem_instance.short_name
data.score = s.score
data.solution_language = get_extension(s.source_file.name)
data.source_file = s.source_file
# here we try to get some optional data, it just may not be there
# and it's ok
try:
registration = (
Participant.objects.select_related()
.get(contest_id=self.contest.id, user=s.user)
.registration_model
)
try:
data.city = registration.city
except AttributeError:
pass
try:
data.school = registration.school.name
data.school_city = registration.school.city
except AttributeError:
pass
except (Participant.DoesNotExist, ObjectDoesNotExist):
pass
submissions_list.append(data)
return submissions_list
def get_submission_source(self, out_file_path, source):
ft_file = django_to_filetracker_path(source)
self.filetracker.get_file(ft_file, out_file_path, add_to_cache=False)
def build_submissions_archive(out_file, submission_collector):
"""
Builds submissions archive, in szubrawcy format, in out_file from data
provided by submission_collector. Argument out_file should be a file-like
object.
"""
submission_list = submission_collector.collect_list()
tmpdir = tempfile.mkdtemp()
try:
contest_id = submission_collector.get_contest_id()
files_dir = os.path.join(tmpdir, contest_id)
os.mkdir(files_dir, 0o700)
with open(os.path.join(files_dir, 'INDEX'), 'w') as f:
index_csv = csv.writer(f)
header = [
'submission_id',
'user_id',
'username',
'first_name',
'last_name',
'city',
'school',
'school_city',
'problem_short_name',
'score',
]
index_csv.writerow(header)
for s in submission_list:
index_entry = [
s.submission_id,
s.user_id,
s.username,
s.first_name,
s.last_name,
s.city,
s.school,
s.school_city,
s.pr
|
oblem_short_name,
s.score,
]
def encode(obj):
if obj is None:
return 'NULL'
else:
return force_text(obj)
index_csv.writerow([encode(col) for col in index_entry])
for s in submission_list:
filename = '%s:%s:%s.%s' % (
|
s.submission_id,
s.username,
s.problem_short_name,
s.solution_language,
)
dest = os.path.join(files_dir, filename)
submission_collector.get_submission_source(dest, s.source_file)
with tarfile.open(fileobj=out_file, mode='w:gz') as tar:
tar.add(files_dir, arcname=contest_id)
finally:
shutil.rmtree(tmpdir)
|
krull/docker-zenoss4
|
init_fs/usr/local/zenoss/ZenPacks/ZenPacks.zenoss.ZenJMX-3.12.1.egg/ZenPacks/zenoss/ZenJMX/tests/test_JMXDataSource.py
|
Python
|
gpl-3.0
| 960
| 0.001042
|
#####################################################
|
#########################
#
# Copyright (C) Zenoss, Inc. 2015, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the direc
|
tory where your Zenoss product is installed.
#
##############################################################################
from Products.ZenTestCase.BaseTestCase import BaseTestCase
from ZenPacks.zenoss.ZenJMX.datasources.JMXDataSource import JMXDataSource
class TestJMXDataSource(BaseTestCase):
def afterSetUp(self):
self.ds = JMXDataSource(id='1')
def test_getDescription(self):
self.assertEqual(self.ds.getDescription(), '${dev/id}')
def test_getProtocols(self):
self.assertEqual(self.ds.getProtocols(), ['REMOTING-JMX', 'RMI', 'JMXMP'])
def test_zmanage_editProperties(self):
with self.assertRaises(AttributeError):
self.ds.zmanage_editProperties()
|
jorisvandenbossche/DS-python-data-analysis
|
notebooks/_solutions/pandas_03a_selecting_data2.py
|
Python
|
bsd-3-clause
| 19
| 0.052632
|
m
|
ales['A
|
ge'].mean()
|
ChopChopKodi/pelisalacarta
|
python/main-classic/channels/peliculasaudiolatino.py
|
Python
|
gpl-3.0
| 9,331
| 0.013631
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import urlparse
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("channels.peliculasaudiolatino mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Recién agregadas", action="peliculas", url="http://peliculasaudiolatino.com/ultimas-agregada
|
s.html", viewmode="movie"))
itemlist.append( Item
|
(channel=item.channel, title="Recién actualizadas", action="peliculas", url="http://peliculasaudiolatino.com/recien-actualizadas.html", viewmode="movie"))
itemlist.append( Item(channel=item.channel, title="Las más vistas", action="peliculas", url="http://peliculasaudiolatino.com/las-mas-vistas.html", viewmode="movie"))
itemlist.append( Item(channel=item.channel, title="Listado por géneros" , action="generos", url="http://peliculasaudiolatino.com"))
itemlist.append( Item(channel=item.channel, title="Listado por años" , action="anyos", url="http://peliculasaudiolatino.com"))
itemlist.append( Item(channel=item.channel, title="Buscar..." , action="search") )
return itemlist
def peliculas(item):
logger.info("channels.peliculasaudiolatino peliculas")
# Descarga la página
data = scrapertools.cachePage(item.url)
# Extrae las entradas de la pagina seleccionada
patron = '<td><a href="([^"]+)"><img src="([^"]+)" class="[^"]+" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle.strip()
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
# Añade al listado
itemlist.append( Item(channel=item.channel, action="findvideos", title=title , fulltitle=title, url=url , thumbnail=thumbnail , plot=plot , folder=True) )
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)"><span class="icon-chevron-right">')
if next_page!="":
itemlist.append( Item(channel=item.channel, action="peliculas", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page).replace("/../../","/"), viewmode="movie", folder=True) )
return itemlist
def generos(item):
logger.info("channels.peliculasaudiolatino generos")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
# Limita el bloque donde buscar
data = scrapertools.find_single_match(data,'<table class="generos"(.*?)</table>')
# Extrae las entradas
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
scrapedurl = urlparse.urljoin(item.url,match[0])
scrapedtitle = match[1].strip()
scrapedthumbnail = ""
scrapedplot = ""
logger.info(scrapedtitle)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True, viewmode="movie") )
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
def anyos(item):
logger.info("channels.peliculasaudiolatino anyos")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
# Limita el bloque donde buscar
data = scrapertools.find_single_match(data,'<table class="years"(.*?)</table>')
logger.info("channels.peliculasaudiolatino data="+data)
# Extrae las entradas
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=url , thumbnail=thumbnail , plot=plot, folder=True, viewmode="movie") )
return itemlist
def search(item,texto):
logger.info("channels.peliculasaudiolatino search")
itemlist = []
texto = texto.replace(" ","+")
try:
# Series
item.url="http://peliculasaudiolatino.com/busqueda.php?q=%s"
item.url = item.url % texto
item.extra = ""
itemlist.extend(peliculas(item))
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def findvideos(item):
logger.info("channels.peliculasaudiolatino videos")
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.find_single_match(data,'<div class="opciones">(.*?)<div id="sidebar"')
logger.info("channels.peliculasaudiolatino videos data="+data)
title = item.title
scrapedthumbnail = item.thumbnail
itemlist = []
'''
<table class="table_links">
<thead>
<tr>
<th class="infotx" align="left">Colaborador</th>
<th class="infotx" align="left">Servidor</th>
<th class="infotx" align="left">Audio</th>
<th class="infotx" align="left">Calidad</th>
<th class="infotx" align="left">Enlace</th>
</tr>
</thead>
<tbody>
<tr>
<th align="left"><a href="http://peliculasaudiolatino.com/perfil/carlosaugus22.html" target="_blank"><img class="smallpic" src="http://peliculasaudiolatino.com/userpic/nopic.png" height="20" width="20" alt="carlosaugus22"><span class="infotx">carlosaugus22</span></a></th>
<th align="left"><img src="http://www.google.com/s2/favicons?domain=vidxtreme.to" width="16" alt="vidxtreme.to"/>
<span class="infotx">vidxtreme.to</span></th>
<th align="left"><img src="http://peliculasaudiolatino.com/images/la_la.png" width="22" alt="Latino" align=absmiddle></th>
<th align="left"><img src="http://peliculasaudiolatino.com/images/1ts.png" alt="TS"> TS</th>
<th class="slink" align="left"><div id="btnp"><a href="http://peliculasaudiolatino.com/vpaste/VmtaYVUxWnRWa1pOVkZwVFZrVnJPUT09K1A=.html" rel="nofollow" target="_blank"><span class="icon-play2"></span> Ver</a></div> </th>
</tr>
<tr>
<th class="headtable" align="left"><a href="http://peliculasaudiolatino.com/perfil/carlosaugus22.html" target="_blank"><img class="smallpic" src="http://peliculasaudiolatino.com/userpic/nopic.png" height="20" width="20" alt="carlosaugus22"><span class="infotx">carlosaugus22</span></a></th>
<th align="left"><img src="http://www.google.com/s2/favicons?domain=streamin.to" width="16" alt="streamin.to"/><span class="infotx">streamin.to</span></th>
<th align="left"><img src="http://peliculasaudiolatino.com/images/la_la.png" width="22" alt="Latino" align=absmiddle></th>
<th align="left"><img src="http://peliculasaudiolatino.com/images/1ts.png" alt="TS"> TS</th>
'''
patron = '<span class="infotx">([^<]+)</span></th[^<]+'
patron += '<th align="left"><img src="[^"]+" width="\d+" alt="([^"]+)"[^<]+</th[^<]+'
patron += '<th align="left"><img[^>]+>([^<]+)</th[^<]+'
patron += '<th class="slink" align="left"><div id="btnp"><a href="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for servidor,idioma,calidad,sc
|
molly/GorillaBot
|
gorillabot/bot.py
|
Python
|
mit
| 16,845
| 0.003443
|
#!/usr/bin/env python3
#
# Copyright (c) 2013-2016 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from configure import Configurator
from executor import Executor
import json
import logging
from logging import handlers
from message import *
import os
import pickle
import queue
import re
import socket
import threading
from time import sleep, strftime, time
class Bot(object):
"""The core of the IRC bot. It maintains the IRC connection, and delegates other tasks."""
def __init__(self):
self.base_path = os.path.dirname(os.path.abspath(__file__))
self.config_path = os.path.abspath(os.path.join(self.base_path, "..", "config.json"))
self.log_path = os.path.abspath(os.path.join(self.base_path, 'logs'))
self.configuration = None
self.configuration_name = None
self.last_message_sent = time()
self.last_ping_sent = time()
self.last_received = None
self.last_reconnect = None
self.logger = None
self.shutdown = threading.Event()
self.shutdown_message = None
self.reconnect_time = 5 # Seconds to wait before reconnecting, or none to not reconnect
self.response_lock = threading.Lock()
self.socket = None
self.message_q = queue.Queue()
self.executor = Executor(self, self.message_q, self.shutdown)
self.header = {"User-Agent": "GorillaBot (https://github.com/molly/GorillaBot)"}
self.initialize()
def action(self, target, message):
"""Perform an action to target on the server."""
self.private_message(target, "\x01ACTION " + message + "\x01")
def caffeinate(self):
"""Make sure the connection stays open."""
now = time()
if now - self.last_received > 150:
if self.last_ping_sent < self.last_received:
self.ping()
elif now - self.last_received > 60:
self.logger.warning('No ping response in 60 seconds. Shutting down.')
self.shutdown_message = 'No ping response in 60 seconds.'
self.shutdown.set()
def connect(self):
"""Connect to the IRC server."""
self.logger.debug('Thread created.')
self.socket = socket.socket()
self.socket.settimeout(5)
try:
self.logger.info('Initiating connection.')
self.socket.connect(("chat.freenode.net", 6667))
except OSError:
self.logger.error("Unable to connect to IRC server. Check your Internet connection.")
self.shutdown.set()
self.maybe_reconnect()
else:
if self.configuration["password"]:
self.send("PASS {0}".format(self.configuration["password"]), hide=True)
self.send("NICK {0}".format(self.configuration["nick"]))
self.send("USER {0} 0 * :{1}".format(self.configuration["ident"],
self.configuration["realname"]))
self.private_message("NickServ", "ACC")
self.loop()
def dispatch(self, line):
"""Inspect this line and determine if further processing is necessary."""
length = len(line)
message = None
if 2 >= length >= 1:
if line[0] == "PING":
message = Ping(self, *line)
if length >= 2:
if line[1] == "PONG":
message = Ping(self, *line)
elif line[1].isdigit():
message = Numeric(self, *line)
elif line[1] == "NOTICE":
message = Notice(self, *line)
elif line[1] == "PRIVMSG":
nick = self.configuration["nick"]
if (length >= 3 and line[2] == nick) or (length >= 4 and (
line[3].startswith(":!") or line[3].startswith(":" + nick))):
message = Command(self, *line)
else:
message = Privmsg(self, *line)
if message:
self.message_q.put(message)
else:
print(line)
def get_admin(self, nick=None):
"""Get the hostnames for the bot admins. If nick is supplied, add that user as an admin."""
botops = self.configuration["botops"]
if nick:
ops = [nick]
else:
ops = botops.keys()
self.response_lock.acquire()
ignored_messages = []
for op in ops:
self.send("WHOIS " + op)
while True:
try:
msg = self.message_q.get(True, 120)
except queue.Empty:
self.logger.error("No response while getting admins. Shutting down.")
self.shutdown.set()
break
else:
|
if type(msg) is Numeric:
if msg.number == '311':
# User info
line = msg.body.split()
botops
|
.update({op: {"user": line[1], "host": line[2]}})
self.logger.info(
"Adding {0} {1} to bot ops".format(line[1], line[2],))
break
elif msg.number == '318':
# End of WHOIS
break
elif msg.number == '401':
# No such user
self.logger.info("No user {0} logged in.".format(op))
break
ignored_messages.append(msg)
self.response_lock.release()
for msg in ignored_messages:
self.message_q.put(msg)
self.configuration["botops"] = botops
self.update_configuration(self.configuration)
def get_configuration(self):
"""Get the configuration dict for the active configuration."""
with open(self.config_path, 'r') as f:
blob = json.load(f)
return blob[self.configuration_name]
def get_setting(self, setting, chan):
"""Get the value of the given setting for the given channel."""
if chan not in self.configuration["chans"]:
self.logger.warning("Tried to get settings for nonexistant channel {}.".format(chan))
return None
if setting not in self.configuration["chans"][chan]["settings"]:
return None
return self.configuration["chans"][chan]["settings"][setting]
def initialize(self):
"""Initialize the bot. Parse command-line options, configure, and set up logging."""
self.admin_commands, self.commands = self.load_commands()
self.setup_logging()
print('\n ."`".'
'\n / _=_ \\ \x1b[32m __ __ __ . . . __ __ __ '
'___\x1b[0m\n(,(oYo),) \x1b[32m / _` / \ |__) | | | |__| '
'|__) / \ | \x1b[0m\n| " | \x1b[32m \__| \__/ | \ | |__ '
'|__ | | |__) \__/ | \x1b[0m \n \(\_/)/\n')
try:
self.configuration_name = Configurator().configure()
self.configuration = self
|
Kotzyk/Projekt-Blackjack
|
blackjack.py
|
Python
|
gpl-3.0
| 25,701
| 0.004655
|
"""
Program do gry w Blackjack (a.k.a. Oczko) w języku Python przy użyciu biblioteki PyGame
Projekt zaliczeniowy - Języki Skryptowe, Informatyka i Ekonometria, rok 1, WZ, AGH
Autorzy: Joanna Jeziorek, Mateusz Koziestański, Katarzyna Maciocha
III 2016
"""
import random as rd
import os
import sys
import pygame
from pygame import *
pygame.font.init()
pygame.mixer.init()
screen = pygame.display.set_mode((800, 480))
clock = pygame.time.Clock()
# poniższe zmienne muszę wstępnie zadeklarować tu, bo inaczej wywala błędy niżej w metodach.
display_font = pygame.font.Font(None, 28)
aces = ['ki_a', 'ka_a', 'pi_a', 'tr_a']
player_hand, dealer_hand = [], []
def load_image(imgname, card):
"""
Metoda do wczytywania plików obrazów.
:param imgname: nazwa pliku png
:param card: obiekt karty
:return: zwraca obraz oraz prostokąt go ograniczający
"""
if card == 1:
fullname = os.path.join("obrazy/karty", imgname)
else:
fullname = os.path.join('obrazy', imgname)
try:
imgname = pygame.image.load(fullname)
except pygame.error as message:
print('Nie można zaladować obrazu:', imgname)
imgname = imgname.convert()
return imgname, imgname.get_rect()
def display(font, sentence):
""" Wyswietlacz tekstu na dole ekranu. Tekst sluży do informowania gracza o tym co sie dzieje."""
display_font = pygame.font.Font.render(font, sentence, 1, (255, 255, 255), (0, 0, 0))
return display_font
# =============Funkcje logiki gry==================
def game_over():
"""
Jesli graczowi skoncza sie pieniadze, wyswietla ekran koncowy. Gracz moze tylko zamknac gre.
"""
while 1:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
# Czarny ekran
screen.fill((0, 0, 0))
# Napis Koniec Gry
oFont = pygame.font.Font(None, 50)
display_font = pygame.font.Font.render(oFont, "Koniec gry! Skonczyly ci sie pieniadze!", 1, (255, 255, 255),
(0, 0, 0))
screen.blit(display_font, (125, 220))
pygame.display.flip()
def create_deck():
"""
Tworzy talię kart nazwanych w konwencji [dwie pierwsze litery koloru]_[karta],
po czym zwraca talię
a = as, k = król, d = dama, w = walet
"""
deck = ['ki_a', 'ki_k', 'ki_d', 'ki_w',
'ka_a', 'ka_k', 'ka_d', 'ka_w',
'tr_a', 'tr_k', 'tr_d', 'tr_w',
'pi_a', 'pi_k', 'pi_d', 'pi_w']
for x in range(2, 11):
kier = 'ki_' + str(x)
karo = 'ka_' + str(x)
trefl = 'tr_' + str(x)
pik = 'pi_' + str(x)
for kolor in [kier, karo, trefl, pik]:
deck.append(kolor)
return deck
def shuffle(deck):
# Przyjmuje talię jako argument i zwraca potasowaną talię. Tasowanie metodą random.shuffle().
rd.shuffle(deck)
return deck
def return_played(deck, played_deck):
# Przekazuje zagrane obrazy do głównej talii.
# Zwraca potasowaną talię i pustą talię zagranych kart.
for card in played_deck:
deck.append(played_deck.pop())
shuffle(deck)
return deck, played_deck
def deck_deal(deck, played_deck):
# Jeśli talia nie jest pusta, rozdaje pierwsze cztery obrazy z talii na przemian graczowi i krupierowi.
# Zwraca kolejno: talię, zagraną talię, rękę gracza i rękę krupiera
dealer_hand, player_hand = [], []
shuffle(deck)
if len(deck) < 5:
deck, played_deck = return_played(deck, played_deck)
# wymaga dopracowania zwracania kart do talii, jeśli jest już pusta.
dealer_hand.append(deck.pop(0))
played_deck.append(dealer_hand[-1])
player_hand.append(deck.pop(0))
played_deck.append(player_hand[-1])
dealer_hand.append(deck.pop(0))
played_deck.append(dealer_hand[-1])
player_hand.append(deck.pop(0))
played
|
_deck.append(player_hand[-1])
return deck, played_deck, player_hand, dealer_hand
def hit(deck, played_deck, hand):
# Jeśli talia nie jest pusta,
|
daje graczowi kartę do ręki.
if len(deck) < 2:
deck, played_deck = return_played(deck, played_deck)
hand.append(deck.pop(0))
played_deck.append(hand[-1])
return deck, played_deck, hand
def value(hand):
# Oblicza wartość kart w ręce.
# Jeśli w ręce znajduje się as, a wartość przekracza 21, zmienia wartość asa z 11 do 1pkt.
value_total = 0
for card in hand:
if card[3] == 'a':
value_total += 11
elif card[3] in ['k', 'd', 'w', '1']:
value_total += 10
else:
value_total += int(card[3])
if value_total > 21:
for card in hand:
if card[3] == 'a':
value_total -= 10
if value_total <= 21:
break
else:
continue
return value_total
def round_end(deck, player_hand, dealer_hand, played_deck, funds, money_gain, money_loss, dealer_cards, CardSprite):
if len(player_hand) == 2 and player_hand[:1] in aces:
money_gain += (money_gain * 3 / 2)
dealer_cards.empty()
dealer_card_position = (50, 70)
for x in dealer_hand:
card = CardSprite(x, dealer_card_position)
dealer_card_position = (dealer_card_position[0] + 80, dealer_card_position[1])
dealer_cards.add(card)
if not dealer_hand:
for card in player_hand:
played_deck.append(card)
player_hand.pop()
for card in dealer_hand:
played_deck.append(card)
dealer_hand.pop()
funds += money_gain
funds -= money_loss
display_font = pygame.font.Font(None, 28)
if funds <= 0:
game_over()
end_round = 1
return deck, player_hand, dealer_hand, played_deck, funds, end_round
def bust(deck, player_hand, dealer_hand, played_deck, funds, money_gain, money_loss, dealer_cards, CardSprite):
font = pygame.font.Font(None, 28)
display_font = display(font, "Gracz przebił! Przegrana: $%.1f." % money_loss)
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds,
money_gain, money_loss, dealer_cards,
CardSprite)
return deck, player_hand, dealer_hand, played_deck, funds, end_round, display_font
def compare(deck, played_deck, player_hand, dealer_hand, funds, bet, dealer_cards, CardSprite):
pv, dv = value(player_hand), value(dealer_hand)
display_font = pygame.font.Font(None, 28)
while dv < 17:
deck, played_deck, dealer_hand = hit(deck, played_deck, dealer_hand)
dv = value(dealer_hand)
if dv < pv <= 21:
# Gracz wygrywa
funds += 2 * bet
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds, bet, 0,
dealer_cards,
CardSprite)
display_font = display(display_font, "Wygrana: $%.1f." % bet)
elif pv == dv and pv <= 21:
# Remis
deck, player_hand, dealer_hand, played_deck, funds, end_round = round_end(deck, player_hand, dealer_hand,
played_deck, funds, 0, 0,
dealer_cards,
CardSprite)
display_font = display(display_font, "Remis!")
elif dv > 21 >= pv:
# Krupier przebił, a gracz nie
deck, player_hand, dealer_hand, played_deck, funds, end_ro
|
gmarkall/COFFEE
|
doc/source/conf.py
|
Python
|
bsd-3-clause
| 7,963
| 0.006405
|
# -*- coding: utf-8 -*-
#
# COFFEE documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 30 11:25:59 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'COFFEE'
copyright = u'2014, Fabio Luporini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
execfile("../../coffee/version.py")
version = '%d.%d' % __version_info__[0:2] # noqa: pulled from coffee/version.py
# The full version, including alpha/beta/rc tags.
release = __version__ # noqa: pulled from coffee/version.py
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template n
|
ames.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
|
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'COFFEEdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'COFFEE.tex', u'COFFEE Documentation',
u'Fabio Luporini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coffee', u'COFFEE Documentation',
[u'Fabio Luporini'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'COFFEE', u'COFFEE Documentation',
u'Fabio Luporini', 'COFFEE', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
youtube/cobalt
|
third_party/web_platform_tests/tools/html5lib/html5lib/tests/test_tokenizer.py
|
Python
|
bsd-3-clause
| 6,544
| 0.000458
|
from __future__ import absolute_import, division, unicode_literals
import json
import warnings
import re
from .support import get_data_files
from html5lib.tokenizer import HTMLTokenizer
from html5lib import constants
class TokenizerTestParser(object):
def __init__(self, initialState, lastStartTag=None):
self.tokenizer = HTMLTokenizer
self._state = initialState
self._lastStartTag = lastStartTag
def parse(self, stream, encoding=None, innerHTML=False):
tokenizer = self.tokenizer(stream, encoding)
self.outputTokens = []
tokenizer.state = getattr(tokenizer, self._state)
if self._lastStartTag is not None:
tokenizer.currentToken = {"type": "startTag",
"name": self._lastStartTag}
types = dict((v, k) for k, v in constants.tokenTypes.items())
for token in tokenizer:
getattr(self, 'process%s' % types[token["type"]])(token)
return self.outputTokens
def processDoctype(self, token):
self.outputTokens.append(["DOCTYPE", token["name"], token["publicId"],
token["systemId"], token["correct"]])
def processStartTag(self, token):
self.outputTokens.append(["StartTag", token["name"],
dict(token["data"][::-1]), token["selfClosing"]])
def processEmptyTag(self, token):
if token["name"] not in constants.voidElements:
self.outputTokens.append("ParseError")
self.outputTokens.append(["StartTag", token["name"], dict(token["data"][::-1])])
def processEndTag(self, token):
self.outputTokens.append(["EndTag", token["name"],
token["selfClosing"]])
def processComment(self, token):
self.outputTokens.append(["Comment", token["data"]])
def processSpaceCharacters(self, token):
self.outputTokens.append(["Character", token["data"]])
self.processSpaceCharacters = self.processCharacters
def processCharacters(self, token):
self.outputTokens.append(["Character", token["data"]])
def processEOF(self, token):
pass
def processParseError(self, token):
self.outputTokens.append(["ParseError", token["data"]])
def concatenateCharacterTokens(tokens):
outputTokens = []
for token in tokens:
if "ParseError" not in token and token[0] == "Character":
if (outputTokens and "ParseError" not in outputTokens[-1] and
outputTokens[-1][0] == "Character"):
outputTokens[-1][1] += token[1]
else:
outputTokens.append(token)
else:
outputTokens.append(token)
return outputTokens
def normalizeTokens(tokens):
# TODO: convert tests to reflect arrays
for i, token in enumerate(tokens):
if token[0] == 'ParseError':
tokens[i] = token[0]
return tokens
def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
ignoreErrors=False):
"""Test whether the test has passed or failed
If the ignoreErrorOrder flag is set to true we don't test the relative
positions of parse errors and non parse errors
"""
checkSelfClosing = False
for token in expectedTokens:
if (token[0] == "StartTag" and
|
len(token) == 4
or token[0] == "EndTag" and len(token) == 3):
checkSelfClosing = True
break
if not checkSelfClosing:
for token in receivedTokens:
if token[0] == "StartTag" or token[0] == "EndTag":
token.pop()
if not ignoreErrorOrder and not ignoreErrors:
return expectedTokens == receiv
|
edTokens
else:
# Sort the tokens into two groups; non-parse errors and parse errors
tokens = {"expected": [[], []], "received": [[], []]}
for tokenType, tokenList in zip(list(tokens.keys()),
(expectedTokens, receivedTokens)):
for token in tokenList:
if token != "ParseError":
tokens[tokenType][0].append(token)
else:
if not ignoreErrors:
tokens[tokenType][1].append(token)
return tokens["expected"] == tokens["received"]
def unescape(test):
def decode(inp):
return inp.encode("utf-8").decode("unicode-escape")
test["input"] = decode(test["input"])
for token in test["output"]:
if token == "ParseError":
continue
else:
token[1] = decode(token[1])
if len(token) > 2:
for key, value in token[2]:
del token[2][key]
token[2][decode(key)] = decode(value)
return test
def runTokenizerTest(test):
warnings.resetwarnings()
warnings.simplefilter("error")
expected = concatenateCharacterTokens(test['output'])
if 'lastStartTag' not in test:
test['lastStartTag'] = None
parser = TokenizerTestParser(test['initialState'],
test['lastStartTag'])
tokens = parser.parse(test['input'])
tokens = concatenateCharacterTokens(tokens)
received = normalizeTokens(tokens)
errorMsg = "\n".join(["\n\nInitial state:",
test['initialState'],
"\nInput:", test['input'],
"\nExpected:", repr(expected),
"\nreceived:", repr(tokens)])
errorMsg = errorMsg
ignoreErrorOrder = test.get('ignoreErrorOrder', False)
assert tokensMatch(expected, received, ignoreErrorOrder, True), errorMsg
def _doCapitalize(match):
return match.group(1).upper()
_capitalizeRe = re.compile(r"\W+(\w)").sub
def capitalize(s):
s = s.lower()
s = _capitalizeRe(_doCapitalize, s)
return s
def testTokenizer():
for filename in get_data_files('tokenizer', '*.test'):
with open(filename) as fp:
tests = json.load(fp)
if 'tests' in tests:
for index, test in enumerate(tests['tests']):
if 'initialStates' not in test:
test["initialStates"] = ["Data state"]
if 'doubleEscaped' in test:
test = unescape(test)
for initialState in test["initialStates"]:
test["initialState"] = capitalize(initialState)
yield runTokenizerTest, test
|
madhurrajn/samashthi
|
startnow/request_processor.py
|
Python
|
bsd-3-clause
| 3,842
| 0.002603
|
import logging
import simplejson
import urllib
|
2
import datetime
import urlparse
from deploy import DEPLOY_STATUS
from google.appengine.api import urlfetch
import g
|
requests
logging.basicConfig()
logger = logging.getLogger(__name__)
class RequestProcessor:
def __init__(self, url_list):
self.url_list = url_list
self.duration_list = []
self.result = []
def process_atomic_request(self, (atime, url)):
response = simplejson.load(urllib2.urlopen(url))
status = response['status']
if status == "OK":
logger.info("Response Successfull")
row = response['rows']
for row_elem in row:
elems = row_elem['elements']
for e in elems:
duration = e['duration_in_traffic']['text']
return (utime, duration)
def parse_url_time(self, url):
query_string_dict = urlparse.parse_qs(url)
atime = query_string_dict['departure_time'][0]
utime = datetime.datetime.fromtimestamp(float(atime)).strftime('%Y-%m-%d %H:%M:%S')
return utime
def parse_rs(self, url, response):
status = response['status']
if status == "OK":
logger.info("Response Successfull")
row = response['rows']
for row_elem in row:
elems = row_elem['elements']
for e in elems:
duration = e['duration_in_traffic']['text']
logger.info("Duration deduced {}".format(duration))
return duration
def process_duration(self, duration):
item_list = duration.split(" ")
if "hour" in duration:
duration_in_min = int(item_list[0]) * 60 + int(item_list[2])
return duration_in_min
else:
return item_list[0]
def parse_reponses(self, responses):
result = []
for response in responses:
utime = self.parse_url_time(response.url)
rs = simplejson.loads(response.content)
duration = self.parse_rs(response.url, rs)
fin_duration = self.process_duration(duration)
result.append((utime, fin_duration))
return result
def process_async_requests(self, url_list):
async_list = []
print url_list
async_requests = [grequests.get(url) for time,url in url_list]
rs = grequests.map(async_requests)
result = self.parse_reponses(rs)
return result
def url_fetch_handle_responses(self, rpc):
url = rpc.request.url()
utime = self.parse_url_time(url)
response = rpc.get_result()
json_dict = simplejson.loads(response.content)
duration = self.parse_rs(url, json_dict)
fin_duration = self.process_duration(duration)
self.result.append((utime, fin_duration))
def create_callback(self, rpc):
return lambda: self.url_fetch_handle_responses(rpc)
def process_google_requests(self, url_list):
self.result = []
rpcs = []
for time,url in url_list:
rpc = urlfetch.create_rpc()
rpc.callback = self.create_callback(rpc)
urlfetch.make_fetch_call(rpc, url)
rpcs.append(rpc)
for rpc in rpcs:
rpc.wait()
logger.info("Result in list {}".format(self.result))
return self.result
def parse_param_value(self, text):
param,value=text.split("=", 1)
print param,value
return(param,value)
def process_requests(self, url_list):
try:
if DEPLOY_STATUS == 1:
return (self.process_google_requests(url_list))
else:
return (self.process_async_requests(url_list))
except Exception as e:
logger.error("Unable to open deploy.text file {}".format(e))
|
eshijia/magnum
|
magnum/conductor/template_definition.py
|
Python
|
apache-2.0
| 21,199
| 0.000047
|
# Copyright 2014 Rackspace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from pkg_resources import iter_entry_points
import requests
import six
from magnum.common import clients
from magnum.common import exception
from magnum.common import paths
from magnum.i18n import _
from magnum.i18n import _LW
LOG = logging.getLogger(__name__)
template_def_opts = [
cfg.StrOpt('k8s_atomic_template_path',
default=paths.basedir_def('templates/heat-kubernetes/'
'kubecluster.yaml'),
deprecated_name='template_path',
deprecated_group='bay_heat',
help=_(
'Location of template to build a k8s cluster on atomic.')),
cfg.StrOpt('k8s_coreos_template_path',
default=paths.basedir_def('templates/heat-kubernetes/'
'kubecluster-coreos.yaml'),
help=_(
'Location of template to build a k8s cluster on CoreOS.')),
cfg.StrOpt('etcd_discovery_service_endpoint_format',
default='https://discovery.etcd.io/new?size=%(size)d',
help=_('Url for etcd public discovery endpoint.')),
cfg.StrOpt('coreos_discovery_token_url',
default=None,
deprecated_name='discovery_token_url',
deprecated_group='bay_heat',
help=_('coreos discovery token url.')),
cfg.StrOpt('swarm_atomic_template_path',
default=paths.basedir_def('templates/docker-swarm/'
|
'swarm.yaml'),
help=_('Location of template to build a swarm '
'cluster on atomic.')),
cfg.StrOpt('swarm_discovery_url_format',
default=None,
help=_('Format string
|
to use for swarm discovery url. '
'Available values: bay_id, bay_uuid. '
'Example: "etcd://etcd.example.com/\%(bay_uuid)s"')),
cfg.BoolOpt('public_swarm_discovery',
default=True,
help=_('Indicates Swarm discovery should use public '
'endpoint.')),
cfg.StrOpt('public_swarm_discovery_url',
default='https://discovery.hub.docker.com/v1/clusters',
help=_('Url for swarm public discovery endpoint.')),
cfg.StrOpt('mesos_ubuntu_template_path',
default=paths.basedir_def('templates/heat-mesos/'
'mesoscluster.yaml'),
help=_('Location of template to build a Mesos cluster '
'on Ubuntu.')),
cfg.ListOpt('enabled_definitions',
default=['magnum_vm_atomic_k8s', 'magnum_vm_coreos_k8s',
'magnum_vm_atomic_swarm', 'magnum_vm_ubuntu_mesos'],
help=_('Enabled bay definition entry points.')),
]
cfg.CONF.register_opts(template_def_opts, group='bay')
class ParameterMapping(object):
"""A mapping associating heat param and bay/baymodel attr.
A ParameterMapping is an association of a Heat parameter name with
an attribute on a Bay, Baymodel, or both.
In the case of both baymodel_attr and bay_attr being set, the Baymodel
will be checked first and then Bay if the attribute isn't set on the
Baymodel.
Parameters can also be set as 'required'. If a required parameter
isn't set, a RequiredArgumentNotProvided exception will be raised.
"""
def __init__(self, heat_param, baymodel_attr=None,
bay_attr=None, required=False,
param_type=lambda x: x):
self.heat_param = heat_param
self.baymodel_attr = baymodel_attr
self.bay_attr = bay_attr
self.required = required
self.param_type = param_type
def set_param(self, params, baymodel, bay):
value = None
if (self.baymodel_attr and
getattr(baymodel, self.baymodel_attr, None) is not None):
value = getattr(baymodel, self.baymodel_attr)
elif (self.bay_attr and
getattr(bay, self.bay_attr, None) is not None):
value = getattr(bay, self.bay_attr)
elif self.required:
kwargs = dict(heat_param=self.heat_param)
raise exception.RequiredParameterNotProvided(**kwargs)
if value is not None:
value = self.param_type(value)
params[self.heat_param] = value
class OutputMapping(object):
"""A mapping associating heat outputs and bay attr.
An OutputMapping is an association of a Heat output with a key
Magnum understands.
"""
def __init__(self, heat_output, bay_attr=None):
self.bay_attr = bay_attr
self.heat_output = heat_output
def set_output(self, stack, bay):
if self.bay_attr is None:
return
output_value = self.get_output_value(stack)
if output_value is not None:
setattr(bay, self.bay_attr, output_value)
def matched(self, output_key):
return self.heat_output == output_key
def get_output_value(self, stack):
for output in stack.outputs:
if output['output_key'] == self.heat_output:
return output['output_value']
LOG.warning(_LW('stack does not have output_key %s'), self.heat_output)
return None
@six.add_metaclass(abc.ABCMeta)
class TemplateDefinition(object):
'''A mapping between Magnum objects and Heat templates.
A TemplateDefinition is essentially a mapping between Magnum objects
and Heat templates. Each TemplateDefinition has a mapping of Heat
parameters.
'''
definitions = None
provides = list()
def __init__(self):
self.param_mappings = list()
self.output_mappings = list()
@staticmethod
def load_entry_points():
for entry_point in iter_entry_points('magnum.template_definitions'):
yield entry_point, entry_point.load(require=False)
@classmethod
def get_template_definitions(cls):
'''Retrieves bay definitions from python entry_points.
Example:
With the following classes:
class TemplateDefinition1(TemplateDefinition):
provides = [
('server_type1', 'os1', 'coe1')
]
class TemplateDefinition2(TemplateDefinition):
provides = [
('server_type2', 'os2', 'coe2')
]
And the following entry_points:
magnum.template_definitions =
template_name_1 = some.python.path:TemplateDefinition1
template_name_2 = some.python.path:TemplateDefinition2
get_template_definitions will return:
{
(server_type1, os1, coe1):
{'template_name_1': TemplateDefinition1},
(server_type2, os2, coe2):
{'template_name_2': TemplateDefinition2}
}
:return: dict
'''
if not cls.definitions:
cls.definitions = dict()
for entry_point, def_class in cls.load_entry_points():
for bay_type in def_class.provides:
bay_type_tuple = (bay_type['server_type'],
bay_type['os'],
bay_type['coe'])
providers = cls.definitions.setdefault(bay_type_tuple,
dict())
providers[entry_point.name] = def_class
re
|
chapel-lang/pychapel
|
module/ext/src/chapel/muahaha.py
|
Python
|
apache-2.0
| 444
| 0.011261
|
#!
|
/usr/bin/env python
from __future__ import print_function
import ctypes
def main():
chpl = ctypes.cdll.LoadLibrary("chapel.so")
chpl.chpl_library_init.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p)]
argv=(ctypes.c_char_p
|
*1)()
argv[0] = "chapel"
chpl.chpl_library_init(0, argv)
print(chpl.nicestuff(1000, 1000, 20))
chpl.chpl_library_finalize()
return chpl
if __name__ == "__main__":
main()
|
yanheven/ali-opensearch-sdk
|
opensearchsdk/tests/v2/test_search.py
|
Python
|
apache-2.0
| 1,190
| 0
|
import mock
from opensearchsdk.apiclient.api_base import Manager
from opensearchsdk.tests import base
from opense
|
archsdk.v2.search import SearchManager
class AppTest(base.TestCase):
def setUp(self):
super(AppTest, self).setUp()
self.search_manager = SearchManager('', '')
mock_send = mock.Mock()
Manager.send_get = Manager.send_post = mock_send
def test_search(self):
# simple searc
|
h
self.search_manager.search('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h')
body = dict(query='a', index_name='b', fetch_fields='c', qp='d',
disable='e', first_formula_name='f', formula_name='g',
summary='h')
Manager.send_get.assert_called_with(body)
# first combine search
body['scroll'] = '1h'
body['search_type'] = 'scan'
self.search_manager.search('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'1h', 'scan')
Manager.send_get.assert_called_with(body)
# second combine search
body = dict(scroll='1h', scroll_id='i')
self.search_manager.search(**body)
Manager.send_get.assert_called_with(body)
|
iffy/norm
|
setup.py
|
Python
|
mit
| 737
| 0.006784
|
# Copyright (c) Matt Haggard.
# See LICENSE for details.
from distutils.core import setup
import os, re
def getVersion():
r_version = re.compile(r"__version__\s*=\s*'(.*?)'")
base_init = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'norm/__init__.py')
guts = open(base_init, 'r').read()
m = r_version.search(guts)
if not m:
raise Exception("Could not find version information")
return m.groups()[0]
setup(
url='https://github.com/iffy/norm',
|
author='Matt Haggard',
author_email='haggardii@gmail.com',
name='norm',
version=getVersion(),
packag
|
es=[
'norm', 'norm.test',
'norm.orm', 'norm.orm.test',
],
requires = [
'Twisted',
]
)
|
mmichie/luckystrike
|
luckystrike/util.py
|
Python
|
mit
| 1,107
| 0.009033
|
import config
import random
import re
import string
import sys
import traceback
from twisted.python import log
|
def generate_password(length = 12):
"""
Generate a random password, ASCII letters + digits only, default length 12
"""
return ''.join(random.choice(string
|
.ascii_letters + string.digits) for _ in range(length))
def campNameToString(name):
"""
Translate Campfire String to one renderable on IRC for nicknames, or channels
"""
return re.sub('\s+', '_', name).lower()
def channel_to_room(channel):
"""
Given an IRC channel, return a Campfire room object
"""
for room_id, room in config.rooms.iteritems():
if room['channel'] == channel:
return room
def error(e):
"""
Take an exception and render it to an IRC notice message for all clients
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
log.err(e)
for user_name, client in config.irc_users.iteritems():
for line in traceback.format_exception(exc_type, exc_value, exc_traceback):
client.notice('LuckyStrike', user_name, line)
|
avrong/timeago
|
setup.py
|
Python
|
mit
| 1,607
| 0.018738
|
# -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import find_packages
LONGDOC = """
A very simple python library, used to format datetime with *** time ago statement.
Install
pip install timeago
Usage
import timeago, datetime
d = datetime.datetime.now() + datetime.timedelta(seconds = 60 * 3.4)
# locale
print (timeago.format(d, locale='zh_CN')) # will print 3分钟后
"""
setup(name = 'timeago',
version = '1.0.7',
description = 'A very simple python library, used to format datetime with `*** time ago` statement. eg: "3 hours ago".',
long_description = LONGDOC,
author = 'hustcc',
author_email = 'i@hust.cc',
url = 'https://github.com/hustcc/timeago',
license = 'MIT',
install_requires = [],
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python',
'Programmi
|
ng Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
|
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities'
],
keywords = 'timeago, seconds ago, minutes ago, hours ago, just now',
packages = find_packages('src'),
package_dir = {'':'src'},
)
|
Return0Software/Flix-with-Friends
|
src/gtk-gui/SearchBar.py
|
Python
|
gpl-2.0
| 11,298
| 0.002655
|
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject
import random
import re
import datetime
class GenrePop(Gtk.Popover):
"""Creates a popover to filter by genre"""
__gsignals__ = {
"genres-updated": (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (object,))
}
def __init__(self, db):
Gtk.Popover.__init__(self)
self.genres = []
box = Gtk.ButtonBox(orientation=Gtk.Orientation.VERTICAL)
for genre in db.listGenres:
button = Gtk.ModelButton(text=genre, role=Gtk.ButtonRole.CHECK,
centered=False)
box.add(button)
button.connect("clicked", self.genre_cb)
self.add(box)
def genre_cb(self, button):
button.set_property("active", not button.get_property("active"))
if button.get_property("active") is True:
self.genres.append(button.get_property("text"))
else:
self.genres.remove(button.get_property("text"))
self.emit("genres-updated", self.genres)
class RatingPop(Gtk.Popover):
"""Creates a popover to filter by minimum rating"""
__gsignals__ = {
"rating-updated": (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (object,))
}
def __init__(self):
Gtk.Popover.__init__(self)
self.scale = Gtk.Scale(draw_value=True, has_origin=True,
value_pos=0).new_with_range(Gtk.Orientation.HORIZONTAL, 0, 10, 1)
self.scale.connect("value-changed", self.scale_cb)
i = 1
while i <= 10:
self.scale.add_mark(i, Gtk.PositionType.TOP)
i += 1
self.scale.set_size_request(150, 40)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5, margin=5)
label = Gtk.Label(label="Choose a\nminimum rating:", justify=Gtk.Justification.CENTER)
box.add(label)
box.add(self.scale)
self.add(box)
def scale_cb(self, scale):
self.emit("rating-updated", scale.get_value())
class DatePop(Gtk.Popover):
"""Creates a popover to filter by release date"""
__gsignals__ = {
"switch-updated": (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (object,)),
"year-updated": (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (object,))
}
def __init__(self, db):
Gtk.Popover.__init__(self)
self.switch = Gtk.Switch(active=False, state=False)
self.switch.connect("state-set", self.switch_cb)
self.combo = Gtk.ComboBoxText(wrap_width=4)
self.combo.connect("changed", self.combo_cb)
x = datetime.datetime.now().year
while x >= db.oldest_year:
self.combo.append_text(s
|
tr(x))
x -= 1
self.combo.set_active(datetime.datetime.now().year - db.oldest_year)
label = Gtk.Label(label="Search for movies produced\nonly in the year above",
justify=Gtk.Justification.CENTER)
switchBox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
switchBox.add(label)
switchBox.add(self.switch)
dateBox = Gtk.Bo
|
x(orientation=Gtk.Orientation.VERTICAL, margin=5, spacing=5)
dateBox.add(self.combo)
dateBox.add(switchBox)
self.add(dateBox)
def switch_cb(self, switch, state):
self.emit("switch-updated", state)
def combo_cb(self, combo):
self.emit("year-updated", combo.get_active_text())
class ViewedByPop(Gtk.Popover):
"""Creates a popover to filter by who has seen the movie"""
__gsignals__ = {
"friends-updated": (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (object,))
}
def __init__(self, db):
Gtk.Popover.__init__(self)
self.friends = []
box = Gtk.ButtonBox(orientation=Gtk.Orientation.VERTICAL)
for genre in db.viewers:
button = Gtk.ModelButton(text=genre, role=Gtk.ButtonRole.CHECK,
centered=False)
box.add(button)
button.connect("clicked", self.friend_cb)
self.add(box)
def friend_cb(self, button):
button.set_property("active", not button.get_property("active"))
if button.get_property("active") is True:
self.friends.append(button.get_property("text"))
else:
self.friends.remove(button.get_property("text"))
self.emit("friends-updated", self.friends)
class SearchBar(Gtk.Revealer):
"""Creates a search bar with an entry and filters"""
__gsignals__ = {
"search-ran": (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (object,))
}
def __init__(self, db):
Gtk.Revealer.__init__(self, transition_duration=300)
self.entry = None
self.db = db
self.genres = []
self.rating = 0
self.switchState = False
self.searchYear = db.oldest_year
self.friends = []
filters = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, margin=5) # box for the 4 search filters
filters.get_style_context().add_class("linked")
criteria = Gtk.Box()
criteria.pack_start(filters, True, False, 0)
criteria.get_style_context().add_class("inline-toolbar")
self.set_property("child", criteria)
self.entry = Gtk.SearchEntry()
self.entry.set_can_focus(True)
self.entry.set_size_request(250, -1)
self.entry.connect("activate", self.entryUpdate_cb)
self.entry.connect("changed", self.entryUpdate_cb)
filters.pack_start(self.entry, True, True, 0)
genrePop = GenrePop(db)
ratingPop = RatingPop()
datePop = DatePop(db)
viewedByPop = ViewedByPop(db)
genrePop.connect("genres-updated", self.genresUpdate_cb)
ratingPop.connect("rating-updated", self.ratingUpdate_cb)
datePop.connect("switch-updated", self.switchUpdate_cb)
datePop.connect("year-updated", self.yearUpdate_cb)
viewedByPop.connect("friends-updated", self.friendsUpdate_cb)
# creating the menu buttons
self.genreButton = Gtk.MenuButton(label="Genre", use_popover=True,
popover=genrePop)
self.genreButton.set_size_request(100, -1)
self.ratingButton = Gtk.MenuButton(label="Rating", use_popover=True,
popover=ratingPop)
self.ratingButton.set_size_request(100, -1)
self.dateButton = Gtk.MenuButton(label="Release Date", use_popover=True,
popover=datePop)
self.dateButton.set_size_request(100, -1)
self.viewedByButton = Gtk.MenuButton(label="Never Seen By", use_popover=True,
popover=viewedByPop)
self.viewedByButton.set_size_request(100, -1)
# connect the buttons to their callbacks
self.genreButton.connect("toggled", self.showPopover_cb, genrePop)
self.dateButton.connect("toggled", self.showPopover_cb, datePop)
self.ratingButton.connect("toggled", self.showPopover_cb, ratingPop)
self.viewedByButton.connect("toggled", self.showPopover_cb, viewedByPop)
filters.pack_start(self.genreButton, True, True, 0)
filters.pack_start(self.ratingButton, True, True, 0)
filters.pack_start(self.dateButton, True, True, 0)
filters.pack_end(self.viewedByButton, True, True, 0)
def entryUpdate_cb(self, entry):
self.run_search()
def genresUpdate_cb(self, pop, genres):
self.genres = genres
self.run_search()
def ratingUpdate_cb(self, pop, rating):
self.rating = rating
self.run_search()
def switchUpdate_cb(self, pop, state):
self.switchState = state
self.run_search()
def yearUpdate_cb(self, pop, year):
self.searchYear = year
self.run_search()
def friendsUpdate_cb(self, pop, friends):
self.friends = friends
self.run_search()
def showPopover_cb(self, button, pop):
pop.show_all()
def run_search(self, show=True): # put main windowStack on a revealer. if rand
|
inveniosoftware/invenio-theme
|
invenio_theme/views.py
|
Python
|
mit
| 1,481
| 0
|
# -*- coding:
|
utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio error handlers."""
from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, render_template
blueprint = Blueprint(
'invenio_theme_frontpage',
__name
|
__
)
@blueprint.route('/')
def index():
"""Simplistic front page view."""
return render_template(
current_app.config['THEME_FRONTPAGE_TEMPLATE'],
)
def unauthorized(e):
"""Error handler to show a 401.html page in case of a 401 error."""
return render_template(current_app.config['THEME_401_TEMPLATE']), 401
def insufficient_permissions(e):
"""Error handler to show a 403.html page in case of a 403 error."""
return render_template(current_app.config['THEME_403_TEMPLATE']), 403
def page_not_found(e):
"""Error handler to show a 404.html page in case of a 404 error."""
return render_template(current_app.config['THEME_404_TEMPLATE']), 404
def too_many_requests(e):
"""Error handler to show a 429.html page in case of a 429 error."""
return render_template(current_app.config['THEME_429_TEMPLATE']), 429
def internal_error(e):
"""Error handler to show a 500.html page in case of a 500 error."""
return render_template(current_app.config['THEME_500_TEMPLATE']), 500
|
kg-bot/SupyBot
|
plugins/Holdem/config.py
|
Python
|
gpl-3.0
| 2,352
| 0.000425
|
###
# Copyright (c) 2005, Jeremy Kelley
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
|
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT L
|
IMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Holdem', True)
Holdem = conf.registerPlugin('Holdem')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(SecIntel, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
# vim:set shiftwidth=4 tabstop=8 expandtab textwidth=78
|
nkremerh/cctools
|
resource_monitor/src/bindings/python3/example_simple_limit.py
|
Python
|
gpl-2.0
| 791
| 0.010114
|
import resource_monitor
import sys
import time
@resource_monitor.monitored(limits = {'wall_time': 1e6}) # wall_time in microseconds
def my_function(n):
sys.stdout.write("waiting for {time} seconds...".format(time=n))
time.sleep(n)
sys.stdout.write("done.\n")
return n
try:
(output, resources) = my_function(0.5)
except Exception as e:
sys.stdout.write("\nGot exception <{err}>, but did not expect any error.\n".format(err=e))
sys.exit(1)
try:
(output, resources) = my_function(2)
except resource_monitor.
|
ResourceExh
|
austion as e:
sys.stdout.write("\nGot expected exception <{err}>.\n".format(err=e))
except Exception as e:
sys.stdout.write("\nGot exception <{err}>, but did not expect such error.\n".format(err=e))
sys.exit(1)
sys.exit(0)
|
electricity345/community.csdt
|
src/community_csdt/community_csdt/src/models/pages/page.py
|
Python
|
mit
| 374
| 0.005348
|
import json
import logging
from community_csdt.src.models import database
class Page(object):
def __init__(self, parent, name):
self.__parent__ = parent
self.__name__ = name
def __getitem__(self, key):
log = logging.getLogger('csdt')
|
log.info("Page.__getitem__()")
|
log.debug("key = %s" % key)
raise KeyError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.