code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# Conector para fiberupload
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[fiberupload.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://fiberupload.com/3jsvp7bm2lcw/Emergo.DVDRip.avi
patronvideos = '(fiberupload.com/[a-z0-9]+)'
logger.info("[fiberupload.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[fiberupload]"
url = "http://"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'fiberupload' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| conejoninja/xbmc-seriesly | servers/fiberupload.py | Python | gpl-3.0 | 1,344 |
__package__ = 'pgeo'
__author__ = 'Barbaglia, Guido - Murzilli, Simone'
__email__ = 'guido.barbaglia@gmail.com; simone.murzilli@gmail.com;'
__license__ = 'GPL2' | geobricks/pgeo | test/db/__init__.py | Python | gpl-2.0 | 160 |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslotest.base
from designate import objects
from designate.objects import adapters
LOG = logging.getLogger(__name__)
class DesignateTestAdapter(adapters.DesignateAdapter):
ADAPTER_OBJECT = objects.DesignateObject
ADAPTER_FORMAT = 'TEST_API'
MODIFICATIONS = {
'fields': {},
'options': {}
}
class DesignateAdapterTest(oslotest.base.BaseTestCase):
def test_get_object_adapter(self):
adapters.DesignateAdapter.get_object_adapter(
'TEST_API', objects.DesignateObject())
def test_object_render(self):
adapters.DesignateAdapter.render('TEST_API', objects.DesignateObject())
| tonyli71/designate | designate/tests/unit/test_objects/test_adapters.py | Python | apache-2.0 | 1,337 |
from csv import DictReader
from django.core.management.base import BaseCommand
from registrations.models import ClinicCode
class Command(BaseCommand):
help = (
"This command takes in a CSV with the columns: uid, code, facility, province,"
"and location, and creates/updates the cliniccodes in the database."
"This will only add or update, it will not remove"
)
def add_arguments(self, parser):
parser.add_argument("data_csv", type=str, help=("The CSV with the data in it"))
def normalise_location(self, location):
"""
Normalises the location from `[longitude,latitude]` to ISO6709
"""
def fractional_part(f):
if not float(f) % 1:
return ""
parts = f.split(".")
return f".{parts[1]}"
try:
longitude, latitude = location.strip("[]").split(",")
return (
f"{int(float(latitude)):+03d}{fractional_part(latitude)}"
f"{int(float(longitude)):+04d}{fractional_part(longitude)}"
"/"
)
except (AttributeError, ValueError, TypeError):
return None
def handle(self, *args, **kwargs):
updated = 0
created = 0
with open(kwargs["data_csv"]) as f:
reader = DictReader(f)
for row in reader:
_, new = ClinicCode.objects.update_or_create(
uid=row["uid"].strip(),
defaults={
"code": row["code"].strip(),
"value": row["code"].strip(),
"name": row["facility"].strip(),
"province": {
"ec": "ZA-EC",
"fs": "ZA-FS",
"gp": "ZA-GT",
"kz": "ZA-NL",
"lp": "ZA-LP",
"mp": "ZA-MP",
"nc": "ZA-NC",
"nw": "ZA-NW",
"wc": "ZA-WC",
}[row["province"].strip()[:2].lower()],
"location": self.normalise_location(row["location"].strip()),
},
)
if new:
created += 1
else:
updated += 1
self.success(f"Updated {updated} and created {created} clinic codes")
def log(self, level, msg):
self.stdout.write(level(msg))
def success(self, msg):
self.log(self.style.SUCCESS, msg)
| praekeltfoundation/ndoh-hub | registrations/management/commands/upload_clinic_codes.py | Python | bsd-3-clause | 2,626 |
#!/usr/bin/python
from mailticket import MailTicket
import settings
import filtres
import correu
import sys
import getopt
import logging
from io import StringIO
logger = logging.getLogger()
ERROR = "ERROR"
SUCCESS = "SUCCESS"
SKIP = "SKIP"
REJECT = "REJECT"
UNKNOWN = "UNKNOWN"
def codi_sortida(estat):
if estat == SUCCESS or estat == SKIP:
return 0
if estat == ERROR:
return 1
if estat == REJECT:
return 2
if estat == UNKNOWN:
return 3
# should not reach
return -1
if __name__ == '__main__':
a = None
opts, args = getopt.getopt(sys.argv[1:], 'c:')
for o, a in opts:
if o == '-c':
settings.load(a)
logging.basicConfig(
filename=settings.get("log_file"),
level=settings.get("log_level"),
format='%(asctime)s [%(process)d] %(name)-12s'
' %(levelname)-8s %(message)s'
)
buffer_logs = StringIO()
logger.addHandler(logging.StreamHandler(buffer_logs))
if a is not None:
logger.info("Fitxer de configuracio [%s]", a)
estat = UNKNOWN
tractat = False
try:
logger.info("-----------------------------------------------------")
logger.info("Llegeixo mail")
mail = MailTicket(sys.stdin.buffer)
logger.info("Mail de %s llegit amb ID %s"
% (mail.get_from(), mail.get_header('message-id')))
if mail.cal_tractar():
if filtres.aplicar_filtres(mail):
tractat = True
estat = SUCCESS
logger.info("Marco el mail com a tractat")
else:
estat = REJECT
logger.info("Rebutjo el mail per no passar els filtres")
else:
estat = SKIP
logger.info("No cal tractar el mail %s" % mail.get_subject_ascii())
except Exception as e:
estat = ERROR
logger.exception(
"Ha petat algun dels filtres i no marco el mail com a tractat"
)
finally:
mail.msg['X-Mailtoticket'] = estat
if not settings.get("no_escriure_sortida"):
print(mail)
logger.info("-----------------------------------------------------")
if not tractat and settings.get("notificar_errors"):
correu.enviar(buffer_logs.getvalue(), mail.msg)
sys.exit(codi_sortida(estat))
| UPC/mailtoticket | mailtoticket.py | Python | agpl-3.0 | 2,370 |
#!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com>
from __future__ import print_function
from capstone import *
from capstone.ppc import *
from xprint import to_x, to_hex, to_x_32
PPC_CODE = b"\x80\x20\x00\x00\x80\x3f\x00\x00\x10\x43\x23\x0e\xd0\x44\x00\x80\x4c\x43\x22\x02\x2d\x03\x00\x80\x7c\x43\x20\x14\x7c\x43\x20\x93\x4f\x20\x00\x21\x4c\xc8\x00\x21"
all_tests = (
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64"),
)
def print_insn_detail(insn):
# print address, mnemonic and operands
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
# "data" instruction generated by SKIPDATA option has no detail
if insn.id == 0:
return
if len(insn.operands) > 0:
print("\top_count: %u" % len(insn.operands))
c = 0
for i in insn.operands:
if i.type == PPC_OP_REG:
print("\t\toperands[%u].type: REG = %s" % (c, insn.reg_name(i.reg)))
if i.type == PPC_OP_IMM:
print("\t\toperands[%u].type: IMM = 0x%s" % (c, to_x_32(i.imm)))
if i.type == PPC_OP_MEM:
print("\t\toperands[%u].type: MEM" % c)
if i.mem.base != 0:
print("\t\t\toperands[%u].mem.base: REG = %s" \
% (c, insn.reg_name(i.mem.base)))
if i.mem.disp != 0:
print("\t\t\toperands[%u].mem.disp: 0x%s" \
% (c, to_x_32(i.mem.disp)))
c += 1
if insn.bc:
print("\tBranch code: %u" % insn.bc)
if insn.bh:
print("\tBranch hint: %u" % insn.bh)
if insn.update_cr0:
print("\tUpdate-CR0: True")
# ## Test class Cs
def test_class():
for (arch, mode, code, comment) in all_tests:
print("*" * 16)
print("Platform: %s" % comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
md.detail = True
for insn in md.disasm(code, 0x1000):
print_insn_detail(insn)
print ()
print("0x%x:\n" % (insn.address + insn.size))
except CsError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_class()
| jbremer/monitor | src/capstone/bindings/python/test_ppc.py | Python | gpl-3.0 | 2,291 |
import colorsys
from dataclasses import asdict, is_dataclass
from enum import Enum
import logging
import random
import time
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union
from .const import ( # imported for back compat, remove once Home Assistant no longer uses
ADDRESSABLE_STATE_CHANGE_LATENCY,
ATTR_MODEL,
ATTR_MODEL_DESCRIPTION,
CHANNEL_STATES,
COLOR_MODE_CCT,
COLOR_MODE_DIM,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODES_RGB,
COLOR_MODES_RGB_CCT,
COLOR_MODES_RGB_W,
DEFAULT_MODE,
DEFAULT_WHITE_CHANNEL_TYPE,
EFFECT_MUSIC,
EFFECT_RANDOM,
MAX_TEMP,
MODE_COLOR,
MODE_CUSTOM,
MODE_MUSIC,
MODE_PRESET,
MODE_SWITCH,
MODE_WW,
MODEL_NUMS_SWITCHS,
NEVER_TIME,
POWER_STATE_CHANGE_LATENCY,
PRESET_MUSIC_MODE,
PRESET_MUSIC_MODE_LEGACY,
PRESET_MUSIC_MODES,
PRESET_PATTERN_CHANGE_LATENCY,
STATE_BLUE,
STATE_CHANGE_LATENCY,
STATE_COOL_WHITE,
STATE_GREEN,
STATE_POWER_STATE,
STATE_RED,
STATE_WARM_WHITE,
STATIC_MODES,
WRITE_ALL_COLORS,
WRITE_ALL_WHITES,
LevelWriteMode,
WhiteChannelType,
)
from .models_db import (
BASE_MODE_MAP,
HARDWARE_MAP,
LEDENETHardware,
LEDENETModel,
get_model,
is_known_model,
)
from .pattern import (
ADDRESSABLE_EFFECT_ID_NAME,
ADDRESSABLE_EFFECT_NAME_ID,
ASSESSABLE_MULTI_COLOR_ID_NAME,
CHRISTMAS_ADDRESSABLE_EFFECT_ID_NAME,
CHRISTMAS_ADDRESSABLE_EFFECT_NAME_ID,
EFFECT_CUSTOM,
EFFECT_CUSTOM_CODE,
EFFECT_ID_NAME,
EFFECT_ID_NAME_LEGACY_CCT,
EFFECT_LIST,
EFFECT_LIST_DIMMABLE,
EFFECT_LIST_LEGACY_CCT,
ORIGINAL_ADDRESSABLE_EFFECT_ID_NAME,
ORIGINAL_ADDRESSABLE_EFFECT_NAME_ID,
PresetPattern,
)
from .protocol import (
PROTOCOL_LEDENET_8BYTE,
PROTOCOL_LEDENET_8BYTE_AUTO_ON,
PROTOCOL_LEDENET_8BYTE_DIMMABLE_EFFECTS,
PROTOCOL_LEDENET_9BYTE,
PROTOCOL_LEDENET_9BYTE_AUTO_ON,
PROTOCOL_LEDENET_9BYTE_DIMMABLE_EFFECTS,
PROTOCOL_LEDENET_ADDRESSABLE_A1,
PROTOCOL_LEDENET_ADDRESSABLE_A2,
PROTOCOL_LEDENET_ADDRESSABLE_A3,
PROTOCOL_LEDENET_ADDRESSABLE_CHRISTMAS,
PROTOCOL_LEDENET_CCT,
PROTOCOL_LEDENET_ORIGINAL,
PROTOCOL_LEDENET_ORIGINAL_CCT,
PROTOCOL_LEDENET_SOCKET,
LEDENETAddressableDeviceConfiguration,
LEDENETOriginalRawState,
LEDENETRawState,
ProtocolLEDENET8Byte,
ProtocolLEDENET8ByteAutoOn,
ProtocolLEDENET8ByteDimmableEffects,
ProtocolLEDENET9Byte,
ProtocolLEDENET9ByteAutoOn,
ProtocolLEDENET9ByteDimmableEffects,
ProtocolLEDENETAddressableA1,
ProtocolLEDENETAddressableA2,
ProtocolLEDENETAddressableA3,
ProtocolLEDENETAddressableChristmas,
ProtocolLEDENETCCT,
ProtocolLEDENETOriginal,
ProtocolLEDENETOriginalCCT,
ProtocolLEDENETSocket,
RemoteConfig,
)
from .scanner import FluxLEDDiscovery, is_legacy_device
from .timer import BuiltInTimer
from .utils import scaled_color_temp_to_white_levels, utils, white_levels_to_color_temp
_LOGGER = logging.getLogger(__name__)
PROTOCOL_PROBES: Tuple[Type[ProtocolLEDENET8Byte], Type[ProtocolLEDENETOriginal]] = (
ProtocolLEDENET8Byte,
ProtocolLEDENETOriginal,
)
PROTOCOL_PROBES_LEGACY: Tuple[
Type[ProtocolLEDENETOriginal], Type[ProtocolLEDENET8Byte]
] = (ProtocolLEDENETOriginal, ProtocolLEDENET8Byte)
PROTOCOL_TYPES = Union[
ProtocolLEDENET8Byte,
ProtocolLEDENET8ByteAutoOn,
ProtocolLEDENET8ByteDimmableEffects,
ProtocolLEDENET9Byte,
ProtocolLEDENET9ByteAutoOn,
ProtocolLEDENET9ByteDimmableEffects,
ProtocolLEDENETAddressableA1,
ProtocolLEDENETAddressableA2,
ProtocolLEDENETAddressableA3,
ProtocolLEDENETOriginal,
ProtocolLEDENETOriginalCCT,
ProtocolLEDENETCCT,
ProtocolLEDENETSocket,
ProtocolLEDENETAddressableChristmas,
]
ADDRESSABLE_PROTOCOLS = {
PROTOCOL_LEDENET_ADDRESSABLE_A1,
PROTOCOL_LEDENET_ADDRESSABLE_A2,
PROTOCOL_LEDENET_ADDRESSABLE_A3,
}
ALL_ADDRESSABLE_PROTOCOLS = (
ProtocolLEDENETAddressableA1,
ProtocolLEDENETAddressableA2,
ProtocolLEDENETAddressableA3,
)
ALL_IC_PROTOCOLS = (ProtocolLEDENETAddressableChristmas, *ALL_ADDRESSABLE_PROTOCOLS)
CHRISTMAS_EFFECTS_PROTOCOLS = {PROTOCOL_LEDENET_ADDRESSABLE_CHRISTMAS}
OLD_EFFECTS_PROTOCOLS = {PROTOCOL_LEDENET_ADDRESSABLE_A1}
NEW_EFFECTS_PROTOCOLS = {
PROTOCOL_LEDENET_ADDRESSABLE_A2,
PROTOCOL_LEDENET_ADDRESSABLE_A3,
}
SPEED_ADJUST_WILL_TURN_ON = {
PROTOCOL_LEDENET_ADDRESSABLE_A1,
PROTOCOL_LEDENET_ADDRESSABLE_A2,
}
PROTOCOL_NAME_TO_CLS = {
PROTOCOL_LEDENET_ORIGINAL: ProtocolLEDENETOriginal,
PROTOCOL_LEDENET_ORIGINAL_CCT: ProtocolLEDENETOriginalCCT,
PROTOCOL_LEDENET_8BYTE: ProtocolLEDENET8Byte,
PROTOCOL_LEDENET_8BYTE_AUTO_ON: ProtocolLEDENET8ByteAutoOn,
PROTOCOL_LEDENET_8BYTE_DIMMABLE_EFFECTS: ProtocolLEDENET8ByteDimmableEffects,
PROTOCOL_LEDENET_9BYTE: ProtocolLEDENET9Byte,
PROTOCOL_LEDENET_9BYTE_AUTO_ON: ProtocolLEDENET9ByteAutoOn,
PROTOCOL_LEDENET_9BYTE_DIMMABLE_EFFECTS: ProtocolLEDENET9ByteDimmableEffects,
PROTOCOL_LEDENET_ADDRESSABLE_A3: ProtocolLEDENETAddressableA3,
PROTOCOL_LEDENET_ADDRESSABLE_A2: ProtocolLEDENETAddressableA2,
PROTOCOL_LEDENET_ADDRESSABLE_A1: ProtocolLEDENETAddressableA1,
PROTOCOL_LEDENET_CCT: ProtocolLEDENETCCT,
PROTOCOL_LEDENET_SOCKET: ProtocolLEDENETSocket,
PROTOCOL_LEDENET_ADDRESSABLE_CHRISTMAS: ProtocolLEDENETAddressableChristmas,
}
PATTERN_CODE_TO_EFFECT = {
PRESET_MUSIC_MODE: MODE_MUSIC,
PRESET_MUSIC_MODE_LEGACY: MODE_MUSIC,
EFFECT_CUSTOM_CODE: EFFECT_CUSTOM,
}
SERIALIZABLE_TYPES = (str, bool, dict, int, float, list, tuple, set)
class DeviceType(Enum):
Bulb = 0
Switch = 1
class LEDENETDevice:
"""An LEDENET Device."""
def __init__(
self,
ipaddr: str,
port: int = 5577,
timeout: float = 5,
discovery: Optional[FluxLEDDiscovery] = None,
) -> None:
"""Init the LEDENEt Device."""
self.ipaddr: str = ipaddr
self.port: int = port
self.timeout: float = timeout
self.raw_state: Optional[Union[LEDENETOriginalRawState, LEDENETRawState]] = None
self.available: Optional[bool] = None
self._model_num: Optional[int] = None
self._model_data: Optional[LEDENETModel] = None
self._paired_remotes: Optional[int] = None
self._remote_config: Optional[RemoteConfig] = None
self._white_channel_channel_type: WhiteChannelType = DEFAULT_WHITE_CHANNEL_TYPE
self._discovery = discovery
self._protocol: Optional[PROTOCOL_TYPES] = None
self._mode: Optional[str] = None
self._transition_complete_time: float = 0
self._preset_pattern_transition_complete_time: float = 0
self._power_state_transition_complete_time: float = 0
self._last_effect_brightness: int = 100
self._device_config: Optional[LEDENETAddressableDeviceConfiguration] = None
self._last_message: Dict[str, bytes] = {}
def _protocol_probes(
self,
) -> Union[
Tuple[Type[ProtocolLEDENETOriginal], Type[ProtocolLEDENET8Byte]],
Tuple[Type[ProtocolLEDENET8Byte], Type[ProtocolLEDENETOriginal]],
]:
"""Determine the probe order based on device type."""
discovery = self.discovery
return (
PROTOCOL_PROBES_LEGACY if is_legacy_device(discovery) else PROTOCOL_PROBES
)
@property
def model_num(self) -> int:
"""Return the model number."""
assert self._model_num is not None
return self._model_num
@property
def model_data(self) -> LEDENETModel:
"""Return the model data."""
assert self._model_data is not None
return self._model_data
@property
def discovery(self) -> Optional[FluxLEDDiscovery]:
"""Return the discovery data."""
return self._discovery
@discovery.setter
def discovery(self, value: FluxLEDDiscovery) -> None:
"""Set the discovery data."""
self._discovery = value
@property
def white_channel_channel_type(self) -> WhiteChannelType:
"""Return the type of the white channel."""
return self._white_channel_channel_type
@white_channel_channel_type.setter
def white_channel_channel_type(self, value: WhiteChannelType) -> None:
"""Set the type of the white channel."""
self._white_channel_channel_type = value
@property
def hardware(self) -> Optional[LEDENETHardware]:
"""Retrurn the hardware mapping for the device."""
if not self._discovery or ATTR_MODEL not in self._discovery:
return None
model = self._discovery.get(ATTR_MODEL)
if model is None:
return None
assert isinstance(model, str)
return HARDWARE_MAP.get(model)
@property
def paired_remotes(self) -> Optional[int]:
"""Return the number of paired remotes or None if not supported."""
return self._paired_remotes
@property
def remote_config(self) -> Optional[RemoteConfig]:
"""Return the number of remote config or None if not supported."""
return self._remote_config
@property
def speed_adjust_off(self) -> int:
"""Return true if the speed of an effect can be adjusted while off."""
return self.protocol not in SPEED_ADJUST_WILL_TURN_ON
@property
def _whites_are_temp_brightness(self) -> bool:
"""Return true if warm_white and cool_white are scaled temp values and not raw 0-255."""
return self.protocol == PROTOCOL_LEDENET_CCT
@property
def model(self) -> str:
"""Return the human readable model description."""
if self._discovery and self._discovery.get(ATTR_MODEL_DESCRIPTION):
return f"{self._discovery[ATTR_MODEL_DESCRIPTION]} (0x{self.model_num:02X})"
return f"{self.model_data.description} (0x{self.model_num:02X})"
@property
def version_num(self) -> int:
"""Return the version number."""
assert self.raw_state is not None
raw_state = self.raw_state
if hasattr(raw_state, "version_number"):
assert isinstance(raw_state, LEDENETRawState)
return raw_state.version_number
return 0 # old devices report as 0
@property
def preset_pattern_num(self) -> int:
"""Return the preset pattern number."""
assert self.raw_state is not None
return self.raw_state.preset_pattern
@property
def rgbwprotocol(self) -> bool:
"""Devices that don't require a separate rgb/w bit."""
return self.rgbwcapable or self.model_data.always_writes_white_and_colors
@property
def microphone(self) -> bool:
"""Devices that have a microphone built in."""
return self.model_data.microphone
@property
def rgbwcapable(self) -> bool:
"""Devices that actually support rgbw."""
color_modes = self.color_modes
return COLOR_MODE_RGBW in color_modes or COLOR_MODE_RGBWW in color_modes
@property
def device_type(self) -> DeviceType:
"""Return the device type."""
is_switch = self.model_num in MODEL_NUMS_SWITCHS
return DeviceType.Switch if is_switch else DeviceType.Bulb
@property
def color_temp(self) -> int:
"""Return the current color temp in kelvin."""
return (self.getWhiteTemperature())[0]
@property
def min_temp(self) -> int:
"""Returns the minimum color temp in kelvin."""
return int(self._white_channel_channel_type.value)
@property
def max_temp(self) -> int:
"""Returns the maximum color temp in kelvin."""
return MAX_TEMP
@property
def _rgbwwprotocol(self) -> bool:
"""Device that uses the 9-byte protocol."""
return self.protocol in (
PROTOCOL_LEDENET_9BYTE,
PROTOCOL_LEDENET_9BYTE_DIMMABLE_EFFECTS,
)
@property
def white_active(self) -> bool:
"""Any white channel is active."""
assert self.raw_state is not None
raw_state = self.raw_state
return bool(raw_state.warm_white or raw_state.cool_white)
@property
def color_active(self) -> bool:
"""Any color channel is active."""
assert self.raw_state is not None
raw_state = self.raw_state
return bool(raw_state.red or raw_state.green or raw_state.blue)
def rgbw_color_temp_support(self, color_modes: Set[str]) -> bool:
"""RGBW color temp support."""
return COLOR_MODE_RGBW in color_modes and self.max_temp != self.min_temp
@property
def color_is_white_only(self) -> bool:
"""Return if the curent color is active and white."""
assert self.raw_state is not None
raw_state = self.raw_state
return bool(
# At least one channel is on
(raw_state.red or raw_state.green or raw_state.blue or raw_state.warm_white)
# The color channels are white
and raw_state.red == raw_state.green == raw_state.blue
)
@property
def multi_color_mode(self) -> bool:
"""The device supports multiple color modes."""
return len(self.color_modes) > 1
@property
def color_modes(self) -> Set[str]:
"""The available color modes."""
color_modes = self._internal_color_modes
# We support CCT mode if the device supports RGBWW
# but we do not add it to internal color modes as
# we need to distingush between devices that are RGB/CCT
# and ones that are RGB&CCT
if (
COLOR_MODE_CCT not in color_modes
and COLOR_MODE_RGBWW in color_modes
or self.rgbw_color_temp_support(color_modes)
):
return {COLOR_MODE_CCT, *color_modes}
return color_modes
@property
def _internal_color_modes(self) -> Set[str]:
"""The internal available color modes."""
assert self.raw_state is not None
if (
self._device_config is not None
# Currently this is only the SK6812RGBW strips on 0xA3
and self._device_config.operating_mode == COLOR_MODE_RGBW
):
return {COLOR_MODE_RGBW}
if not is_known_model(self.model_num):
# Default mode is RGB
return BASE_MODE_MAP.get(self.raw_state.mode & 0x0F, {DEFAULT_MODE})
model_data = self.model_data
return model_data.mode_to_color_mode.get(
self.raw_state.mode, model_data.color_modes
)
@property
def pixels_per_segment(self) -> Optional[int]:
"""Return the pixels per segment."""
if self._device_config is None:
return None
return self._device_config.pixels_per_segment
@property
def segments(self) -> Optional[int]:
"""Return the number of segments."""
if self._device_config is None:
return None
return self._device_config.segments
@property
def music_pixels_per_segment(self) -> Optional[int]:
"""Return the music pixels per segment."""
if self._device_config is None:
return None
return self._device_config.music_pixels_per_segment
@property
def music_segments(self) -> Optional[int]:
"""Return the number of music segments."""
if self._device_config is None:
return None
return self._device_config.music_segments
@property
def wiring(self) -> Optional[str]:
"""Return the sort order as a string."""
device_config = self.model_data.device_config
if not device_config.wiring:
return None
if self._device_config:
return self._device_config.wiring
assert self.raw_state is not None
return device_config.num_to_wiring.get(int((self.raw_state.mode & 0xF0) / 16))
@property
def wiring_num(self) -> Optional[int]:
"""Return the wiring number."""
if not self.model_data.device_config.wiring:
return None
if self._device_config:
return self._device_config.wiring_num
assert self.raw_state is not None
return int((self.raw_state.mode & 0xF0) / 16)
@property
def wirings(self) -> Optional[List[str]]:
"""Return available wirings for the device."""
device_config = self.model_data.device_config
if not device_config.wiring:
return None
if self._device_config:
return list(self._device_config.wirings)
return list(device_config.wiring_to_num)
@property
def operating_mode(self) -> Optional[str]:
"""Return the strip mode as a string."""
device_config = self.model_data.device_config
if not device_config.operating_modes:
return None
if self._device_config:
return self._device_config.operating_mode
assert self.raw_state is not None
return device_config.num_to_operating_mode.get(self.raw_state.mode & 0x0F)
@property
def operating_mode_num(self) -> Optional[int]:
"""Return the strip mode as a string."""
if not self.model_data.device_config.operating_modes:
return None
assert self.raw_state is not None
return self.raw_state.mode & 0x0F
@property
def operating_modes(self) -> Optional[List[str]]:
"""Return available operating modes for the device."""
if not self.model_data.device_config.operating_modes:
return None
return list(self.model_data.device_config.operating_mode_to_num)
@property
def ic_type(self) -> Optional[str]:
"""Return the strip ictype as a string."""
if not self.model_data.device_config.ic_type:
return None
assert self._device_config is not None
return self._device_config.ic_type
@property
def ic_type_num(self) -> Optional[int]:
"""Return the strip ictype as an int."""
if not self.model_data.device_config.ic_type:
return None
assert self._device_config is not None
return self._device_config.ic_type_num
@property
def ic_types(self) -> Optional[List[str]]:
"""Return the ic types."""
if not self.model_data.device_config.ic_type:
return None
return list(self.model_data.device_config.ic_type_to_num)
@property
def color_mode(self) -> Optional[str]:
"""The current color mode."""
color_modes = self._internal_color_modes
if COLOR_MODE_RGBWW in color_modes:
# We support CCT mode if the device supports RGBWW
return COLOR_MODE_RGBWW if self.color_active else COLOR_MODE_CCT
if self.rgbw_color_temp_support(color_modes):
# We support CCT mode if the device supports RGB&W
return COLOR_MODE_CCT if self.color_is_white_only else COLOR_MODE_RGBW
if (
color_modes == COLOR_MODES_RGB_CCT
): # RGB/CCT split, only one active at a time
return COLOR_MODE_CCT if self.white_active else COLOR_MODE_RGB
if color_modes == COLOR_MODES_RGB_W: # RGB/W split, only one active at a time
return COLOR_MODE_DIM if self.white_active else COLOR_MODE_RGB
if color_modes:
return list(color_modes)[0]
return None # Usually a switch or non-light device
@property
def protocol(self) -> Optional[str]:
"""Returns the name of the protocol in use."""
if self._protocol is None:
return None
return self._protocol.name
@property
def dimmable_effects(self) -> bool:
"""Return true of the device supports dimmable effects."""
assert self._protocol is not None
return self._protocol.dimmable_effects
@property
def requires_turn_on(self) -> bool:
"""Return true of the device requires a power on command before setting levels/effects."""
assert self._protocol is not None
return self._protocol.requires_turn_on
@property
def is_on(self) -> bool:
assert self.raw_state is not None
assert self._protocol is not None
return self.raw_state.power_state == self._protocol.on_byte
@property
def mode(self) -> Optional[str]:
return self._mode
@property
def warm_white(self) -> int:
assert self.raw_state is not None
return self.raw_state.warm_white if self._rgbwwprotocol else 0
@property
def effect_list(self) -> List[str]:
"""Return the list of available effects."""
effects: Iterable[str] = []
protocol = self.protocol
if protocol in OLD_EFFECTS_PROTOCOLS:
effects = ORIGINAL_ADDRESSABLE_EFFECT_ID_NAME.values()
elif protocol in NEW_EFFECTS_PROTOCOLS:
effects = ADDRESSABLE_EFFECT_ID_NAME.values()
elif protocol in CHRISTMAS_EFFECTS_PROTOCOLS:
effects = CHRISTMAS_ADDRESSABLE_EFFECT_ID_NAME.values()
elif COLOR_MODES_RGB.intersection(self.color_modes):
effects = EFFECT_LIST_DIMMABLE if self.dimmable_effects else EFFECT_LIST
elif protocol == PROTOCOL_LEDENET_ORIGINAL_CCT:
effects = EFFECT_LIST_LEGACY_CCT
if self.microphone:
return [*effects, EFFECT_RANDOM, EFFECT_MUSIC]
return [*effects, EFFECT_RANDOM]
@property
def effect(self) -> Optional[str]:
"""Return the current effect."""
if self.protocol in CHRISTMAS_EFFECTS_PROTOCOLS:
return self._named_effect
return PATTERN_CODE_TO_EFFECT.get(self.preset_pattern_num, self._named_effect)
@property
def _named_effect(self) -> Optional[str]:
"""Returns the named effect."""
assert self.raw_state is not None
mode = self.raw_state.mode
pattern_code = self.preset_pattern_num
protocol = self.protocol
if protocol in OLD_EFFECTS_PROTOCOLS:
effect_id = (pattern_code << 8) + mode - 99
return ORIGINAL_ADDRESSABLE_EFFECT_ID_NAME.get(effect_id)
if protocol in NEW_EFFECTS_PROTOCOLS:
if pattern_code == 0x25:
return ADDRESSABLE_EFFECT_ID_NAME.get(mode)
if pattern_code == 0x24:
return ASSESSABLE_MULTI_COLOR_ID_NAME.get(mode)
return None
if protocol in CHRISTMAS_EFFECTS_PROTOCOLS:
if pattern_code == 0x25:
return CHRISTMAS_ADDRESSABLE_EFFECT_ID_NAME.get(mode)
return None
if protocol == PROTOCOL_LEDENET_ORIGINAL_CCT:
return EFFECT_ID_NAME_LEGACY_CCT.get(pattern_code)
return EFFECT_ID_NAME.get(pattern_code)
@property
def cool_white(self) -> int:
assert self.raw_state is not None
if self._rgbwwprotocol:
return self.raw_state.cool_white
return 0
# Old name is deprecated
@property
def cold_white(self) -> int:
return self.cool_white
@property
def brightness(self) -> int:
"""Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
for CCT calculate the brightness.
for ww send led level
"""
color_mode = self.color_mode
raw_state = self.raw_state
assert raw_state is not None
if self._named_effect:
if self.dimmable_effects:
if (
self.protocol in NEW_EFFECTS_PROTOCOLS
and time.monotonic() > self._transition_complete_time
):
# the red byte holds the brightness during an effect
return min(255, round(raw_state.red * 255 / 100))
return round(self._last_effect_brightness * 255 / 100)
return 255
if raw_state.preset_pattern in PRESET_MUSIC_MODES and not self.dimmable_effects:
return 255
if color_mode == COLOR_MODE_DIM:
return int(raw_state.warm_white)
elif color_mode == COLOR_MODE_CCT:
_, b = self.getWhiteTemperature()
return b
r, g, b = self.getRgb()
_, _, v = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)
v_255 = v * 255
if color_mode == COLOR_MODE_RGBW:
return round((v_255 + raw_state.warm_white) / 2)
if color_mode == COLOR_MODE_RGBWW:
return round((v_255 + raw_state.warm_white + raw_state.cool_white) / 3)
# Default color mode (RGB)
return int(v_255)
def _determineMode(self) -> Optional[str]:
assert self.raw_state is not None
pattern_code = self.raw_state.preset_pattern
if self.device_type == DeviceType.Switch:
return MODE_SWITCH
if pattern_code in (0x41, 0x61):
if self.color_mode in {COLOR_MODE_DIM, COLOR_MODE_CCT}:
return MODE_WW
return MODE_COLOR
if pattern_code == EFFECT_CUSTOM_CODE:
return (
MODE_PRESET
if self.protocol in CHRISTMAS_EFFECTS_PROTOCOLS
else MODE_CUSTOM
)
if pattern_code in (PRESET_MUSIC_MODE, PRESET_MUSIC_MODE_LEGACY):
return MODE_MUSIC
if PresetPattern.valid(pattern_code):
return MODE_PRESET
if BuiltInTimer.valid(pattern_code):
return BuiltInTimer.valtostr(pattern_code)
if self.protocol in ADDRESSABLE_PROTOCOLS:
return MODE_PRESET
return None
def set_unavailable(self) -> None:
self.available = False
def set_available(self) -> None:
self.available = True
def process_device_config_response(self, msg: bytes) -> None:
"""Process an IC (strip config) response."""
assert isinstance(self._protocol, ALL_IC_PROTOCOLS)
self._device_config = self._protocol.parse_strip_setting(msg)
_LOGGER.debug("%s: device_config: %s", self.ipaddr, self._device_config)
def process_state_response(self, rx: bytes) -> bool:
assert self._protocol is not None
if not self._protocol.is_valid_state_response(rx):
_LOGGER.warning(
"%s: Recieved invalid response: %s",
self.ipaddr,
utils.raw_state_to_dec(rx),
)
return False
raw_state: Union[
LEDENETOriginalRawState, LEDENETRawState
] = self._protocol.named_raw_state(rx)
_LOGGER.debug("%s: State: %s", self.ipaddr, raw_state)
if raw_state != self.raw_state:
_LOGGER.debug(
"%s: unmapped raw state: %s",
self.ipaddr,
utils.raw_state_to_dec(raw_state),
)
now_time = time.monotonic()
transition_states = set()
if now_time < self._power_state_transition_complete_time:
transition_states.add(STATE_POWER_STATE)
if now_time < self._transition_complete_time:
# Do not update the channel states if a transition is
# in progress as the state will not be correct
# until the transition is completed since devices
# "FADE" into the state requested.
transition_states |= CHANNEL_STATES
if now_time < self._preset_pattern_transition_complete_time:
transition_states.add("preset_pattern")
if transition_states:
self._replace_raw_state(
{
name: value
for name, value in raw_state._asdict().items()
if name not in transition_states
}
)
else:
self._set_raw_state(raw_state)
_LOGGER.debug("%s: Mapped State: %s", self.ipaddr, self.raw_state)
mode = self._determineMode()
if mode is None:
_LOGGER.debug(
"%s: Unable to determine mode from raw state: %s",
self.ipaddr,
utils.raw_state_to_dec(rx),
)
return False
self._mode = mode
return True
def process_power_state_response(self, msg: bytes) -> bool:
"""Process a power state change message."""
assert self._protocol is not None
if not self._protocol.is_valid_power_state_response(msg):
_LOGGER.warning(
"%s: Recieved invalid power state response: %s",
self.ipaddr,
utils.raw_state_to_dec(msg),
)
return False
_LOGGER.debug("%s: Setting power state to: %s", self.ipaddr, f"0x{msg[2]:02X}")
self._set_power_state(msg[2])
return True
def _set_raw_state(
self,
raw_state: Union[LEDENETOriginalRawState, LEDENETRawState],
updated: Optional[Set[str]] = None,
) -> None:
"""Set the raw state remapping channels as needed.
The goal is to normalize the data so the raw state
is always in the same format reguardless of the protocol
Some devices need to have channels remapped
Other devices uses color_temp/brightness format
which needs to be converted back to 0-255 values for
warm_white and cool_white
"""
channel_map = self.model_data.channel_map
# Only remap updated states as we do not want to switch any
# state that have not changed since they will already be in
# the correct slot
#
# If updated is None than all raw_state values have been sent
#
if self._whites_are_temp_brightness:
assert isinstance(raw_state, LEDENETRawState)
# Only convert on a full update since we still use 0-255 internally
if updated is not None:
self.raw_state = raw_state
return
# warm_white is the color temp from 1-100
temp = raw_state.warm_white
# cold_white is the brightness from 1-100
brightness = raw_state.cool_white
warm_white, cool_white = scaled_color_temp_to_white_levels(temp, brightness)
self.raw_state = raw_state._replace(
warm_white=warm_white, cool_white=cool_white
)
return
if channel_map:
if updated is None:
updated = set(channel_map.keys())
self.raw_state = raw_state._replace(
**{
name: getattr(raw_state, source)
if source in updated
else getattr(raw_state, name)
for name, source in channel_map.items()
}
)
return
if isinstance(self._protocol, ProtocolLEDENETAddressableA3):
if updated is not None:
self.raw_state = raw_state
return
# A3 uses a unique scale for warm white
self.raw_state = raw_state._replace(
warm_white=utils.A3WarmWhiteToByte(raw_state.warm_white)
)
return
self.raw_state = raw_state
def __str__(self) -> str: # noqa: C901
assert self.raw_state is not None
assert self._protocol is not None
rx = self.raw_state
if not rx:
return "No state data"
mode = self.mode
color_mode = self.color_mode
power_str = "Unknown power state"
if rx.power_state == self._protocol.on_byte:
power_str = "ON "
elif rx.power_state == self._protocol.off_byte:
power_str = "OFF "
if mode in STATIC_MODES:
if color_mode in COLOR_MODES_RGB:
mode_str = f"Color: {(rx.red, rx.green, rx.blue)}"
# Should add ability to get CCT from rgbwcapable*
if self.rgbwcapable:
mode_str += f" White: {rx.warm_white}"
else:
mode_str += f" Brightness: {round(self.brightness * 100 / 255)}%"
elif color_mode == COLOR_MODE_DIM:
mode_str = f"Warm White: {utils.byteToPercent(rx.warm_white)}%"
elif color_mode == COLOR_MODE_CCT:
cct_value = self.getWhiteTemperature()
mode_str = "CCT: {}K Brightness: {}%".format(
cct_value[0], round(cct_value[1] * 100 / 255)
)
elif mode == MODE_PRESET:
mode_str = f"Pattern: {self.effect} (Speed {self.speed}%)"
elif mode == MODE_CUSTOM:
mode_str = f"Custom pattern (Speed {self.speed}%)"
elif BuiltInTimer.valid(rx.preset_pattern):
mode_str = BuiltInTimer.valtostr(rx.preset_pattern)
elif mode == MODE_MUSIC:
mode_str = "Music"
elif mode == MODE_SWITCH:
mode_str = "Switch"
else:
mode_str = f"Unknown mode 0x{rx.preset_pattern:x}"
mode_str += " raw state: "
mode_str += utils.raw_state_to_dec(rx)
return f"{power_str} [{mode_str}]"
def _set_power_state(self, new_power_state: int) -> None:
"""Set the power state in the raw state."""
self._replace_raw_state({"power_state": new_power_state})
self._set_transition_complete_time()
def _replace_raw_state(self, new_states: Dict[str, int]) -> None:
assert self.raw_state is not None
_LOGGER.debug("%s: _replace_raw_state: %s", self.ipaddr, new_states)
self._set_raw_state(
self.raw_state._replace(**new_states), set(new_states.keys())
)
def isOn(self) -> bool:
return self.is_on
def getWarmWhite255(self) -> int:
if self.color_mode not in {COLOR_MODE_CCT, COLOR_MODE_DIM}:
return 255
return self.brightness
def getWhiteTemperature(self) -> Tuple[int, int]:
"""Returns the color temp and brightness"""
# Assume input temperature of between 2700 and 6500 Kelvin, and scale
# the warm and cold LEDs linearly to provide that
assert self.raw_state is not None
raw_state = self.raw_state
warm_white = raw_state.warm_white
if self.rgbw_color_temp_support(self.color_modes):
cool_white = raw_state.red if self.color_is_white_only else 0
else:
cool_white = raw_state.cool_white
temp, brightness = white_levels_to_color_temp(
warm_white, cool_white, self.min_temp, self.max_temp
)
return temp, brightness
def getRgbw(self) -> Tuple[int, int, int, int]:
"""Returns red,green,blue,white (usually warm)."""
if self.color_mode not in COLOR_MODES_RGB:
return (255, 255, 255, 255)
return self.rgbw
@property
def rgbw(self) -> Tuple[int, int, int, int]:
"""Returns red,green,blue,white (usually warm)."""
assert self.raw_state is not None
raw_state = self.raw_state
return (
raw_state.red,
raw_state.green,
raw_state.blue,
raw_state.warm_white,
)
def getRgbww(self) -> Tuple[int, int, int, int, int]:
"""Returns red,green,blue,warm,cool."""
if self.color_mode not in COLOR_MODES_RGB:
return (255, 255, 255, 255, 255)
return self.rgbww
@property
def rgbww(self) -> Tuple[int, int, int, int, int]:
"""Returns red,green,blue,warm,cool."""
raw_state = self.raw_state
assert raw_state is not None
return (
raw_state.red,
raw_state.green,
raw_state.blue,
raw_state.warm_white,
raw_state.cool_white,
)
def getRgbcw(self) -> Tuple[int, int, int, int, int]:
"""Returns red,green,blue,cool,warm."""
if self.color_mode not in COLOR_MODES_RGB:
return (255, 255, 255, 255, 255)
return self.rgbcw
@property
def rgbcw(self) -> Tuple[int, int, int, int, int]:
"""Returns red,green,blue,cool,warm."""
raw_state = self.raw_state
assert raw_state is not None
return (
raw_state.red,
raw_state.green,
raw_state.blue,
raw_state.cool_white,
raw_state.warm_white,
)
def getCCT(self) -> Tuple[int, int]:
if self.color_mode != COLOR_MODE_CCT:
return (255, 255)
raw_state = self.raw_state
assert raw_state is not None
return (raw_state.warm_white, raw_state.cool_white)
@property
def speed(self) -> int:
assert self.raw_state is not None
if self.protocol in ADDRESSABLE_PROTOCOLS:
return self.raw_state.speed
if self.protocol in CHRISTMAS_EFFECTS_PROTOCOLS:
return utils.delayToSpeed(self.raw_state.green)
return utils.delayToSpeed(self.raw_state.speed)
def getSpeed(self) -> int:
return self.speed
def _generate_random_levels_change(self) -> Tuple[List[bytearray], Dict[str, int]]:
"""Generate a random levels change."""
channels = {STATE_WARM_WHITE}
if COLOR_MODES_RGB.intersection(self.color_modes):
channels = {STATE_RED, STATE_GREEN, STATE_BLUE}
elif COLOR_MODE_CCT in self.color_modes:
channels = {STATE_WARM_WHITE, STATE_COOL_WHITE}
return self._generate_levels_change(
{
channel: random.randint(0, 255) if channel in channels else None
for channel in CHANNEL_STATES
}
)
def _generate_levels_change( # noqa: C901
self,
channels: Dict[str, Optional[int]],
persist: bool = True,
brightness: Optional[int] = None,
) -> Tuple[List[bytearray], Dict[str, int]]:
"""Generate the levels change request."""
channel_map = self.model_data.channel_map
if channel_map:
mapped_channels = {
channel: channels[channel_map.get(channel, channel)]
for channel in channels
}
else:
mapped_channels = channels
r = mapped_channels[STATE_RED]
g = mapped_channels[STATE_GREEN]
b = mapped_channels[STATE_BLUE]
w = mapped_channels[STATE_WARM_WHITE]
w2 = mapped_channels[STATE_COOL_WHITE]
if (r or g or b) and (w or w2) and not self.rgbwcapable:
raise ValueError("RGB&CW command sent to non-RGB&CW device")
if brightness is not None and r is not None and g is not None and b is not None:
(r, g, b) = self._calculateBrightness((r, g, b), brightness)
r_value = None if r is None else int(r)
g_value = None if g is None else int(g)
b_value = None if b is None else int(b)
w_value = None if w is None else int(w)
# ProtocolLEDENET9Byte devices support two white outputs for cold and warm.
if w2 is None:
if w is not None and self.color_mode in {COLOR_MODE_CCT, COLOR_MODE_RGBWW}:
# If we're only setting a single white value, we preserve the cold white value
w2_value: Optional[int] = self.cold_white
else:
# If we're only setting a single white value, we set the second output to be the same as the first
w2_value = w_value
else:
w2_value = int(w2)
write_mode = LevelWriteMode.ALL
# rgbwprotocol always overwrite both color & whites
if not self.rgbwprotocol:
if w is None and w2 is None:
write_mode = LevelWriteMode.COLORS
elif r is None and g is None and b is None:
write_mode = LevelWriteMode.WHITES
assert self._protocol is not None
msgs = self._protocol.construct_levels_change(
persist, r_value, g_value, b_value, w_value, w2_value, write_mode
)
updates = {}
multi_mode = self.multi_color_mode
if multi_mode or write_mode in WRITE_ALL_COLORS:
updates.update(
{"red": r_value or 0, "green": g_value or 0, "blue": b_value or 0}
)
if multi_mode or write_mode in WRITE_ALL_WHITES:
updates.update({"warm_white": w_value or 0, "cool_white": w2_value or 0})
return msgs, updates
def _set_transition_complete_time(self) -> None:
"""Set the time we expect the transition will be completed.
Devices fade to a specific state so we want to avoid
consuming state updates into self.raw_state while a transition
is in progress as this will provide unexpected results
and the brightness values will be wrong until
the transition completes.
"""
assert self.raw_state is not None
latency = STATE_CHANGE_LATENCY
if self.protocol in ADDRESSABLE_PROTOCOLS:
latency = ADDRESSABLE_STATE_CHANGE_LATENCY
transition_time = latency + utils.speedToDelay(self.raw_state.speed) / 100
self._transition_complete_time = time.monotonic() + transition_time
_LOGGER.debug(
"%s: Transition time is %s, set _transition_complete_time to %s",
self.ipaddr,
transition_time,
self._transition_complete_time,
)
# If we are doing a state transition cancel and preset pattern transition
self._preset_pattern_transition_complete_time = NEVER_TIME
def _set_preset_pattern_transition_complete_time(self) -> None:
"""Set the time we expect the preset_pattern transition will be completed."""
assert self.raw_state is not None
self._preset_pattern_transition_complete_time = (
time.monotonic() + PRESET_PATTERN_CHANGE_LATENCY
)
_LOGGER.debug(
"%s: Mode transition time is %s, set _preset_pattern_transition_complete_time to %s",
self.ipaddr,
PRESET_PATTERN_CHANGE_LATENCY,
self._preset_pattern_transition_complete_time,
)
def _set_power_transition_complete_time(self) -> None:
"""Set the time we expect the power transition will be completed."""
assert self.raw_state is not None
self._power_state_transition_complete_time = (
time.monotonic() + POWER_STATE_CHANGE_LATENCY
)
_LOGGER.debug(
"%s: Mode transition time is %s, set _power_state_transition_complete_time to %s",
self.ipaddr,
POWER_STATE_CHANGE_LATENCY,
self._power_state_transition_complete_time,
)
def getRgb(self) -> Tuple[int, int, int]:
if self.color_mode not in COLOR_MODES_RGB:
return (255, 255, 255)
return self.rgb
@property
def rgb(self) -> Tuple[int, int, int]:
assert self.raw_state is not None
raw_state = self.raw_state
return (raw_state.red, raw_state.green, raw_state.blue)
@property
def rgb_unscaled(self) -> Tuple[int, int, int]:
"""Return the unscaled RGB."""
r, g, b = self.rgb
hsv = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)
r_p, g_p, b_p = colorsys.hsv_to_rgb(hsv[0], hsv[1], 1)
return round(r_p * 255), round(g_p * 255), round(b_p * 255)
def _calculateBrightness(
self, rgb: Tuple[int, int, int], level: int
) -> Tuple[int, int, int]:
hsv = colorsys.rgb_to_hsv(*rgb)
r, g, b = colorsys.hsv_to_rgb(hsv[0], hsv[1], level)
return int(r), int(g), int(b)
def setProtocol(self, protocol: str) -> None:
cls = PROTOCOL_NAME_TO_CLS.get(protocol)
if cls is None:
raise ValueError(f"Invalid protocol: {protocol}")
self._protocol = cls() # type: ignore
def _set_protocol_from_msg(
self,
full_msg: bytes,
fallback_protocol: str,
) -> None:
self._model_num = full_msg[1]
self._model_data = get_model(self._model_num, fallback_protocol)
version_num = full_msg[10] if len(full_msg) > 10 else 1
self.setProtocol(self._model_data.protocol_for_version_num(version_num))
def _generate_preset_pattern(
self, pattern: int, speed: int, brightness: int
) -> bytearray:
"""Generate the preset pattern protocol bytes."""
protocol = self.protocol
if protocol in OLD_EFFECTS_PROTOCOLS:
if pattern not in ORIGINAL_ADDRESSABLE_EFFECT_ID_NAME:
raise ValueError("Pattern must be between 1 and 302")
elif protocol in NEW_EFFECTS_PROTOCOLS:
if pattern not in ADDRESSABLE_EFFECT_ID_NAME:
raise ValueError("Pattern must be between 1 and 100")
elif protocol in CHRISTMAS_EFFECTS_PROTOCOLS:
if pattern not in CHRISTMAS_ADDRESSABLE_EFFECT_ID_NAME:
raise ValueError("Pattern must be between 1 and 100")
else:
PresetPattern.valid_or_raise(pattern)
if not (1 <= brightness <= 100):
raise ValueError("Brightness must be between 1 and 100")
self._last_effect_brightness = brightness
assert self._protocol is not None
return self._protocol.construct_preset_pattern(pattern, speed, brightness)
def _generate_custom_patterm(
self, rgb_list: List[Tuple[int, int, int]], speed: int, transition_type: str
) -> bytearray:
"""Generate the custom pattern protocol bytes."""
# truncate if more than 16
if len(rgb_list) > 16:
_LOGGER.warning(
"Too many colors in %s, truncating list to %s", len(rgb_list), 16
)
del rgb_list[16:]
# quit if too few
if len(rgb_list) == 0:
raise ValueError("setCustomPattern requires at least one color tuples")
assert self._protocol is not None
return self._protocol.construct_custom_effect(rgb_list, speed, transition_type)
def _effect_to_pattern(self, effect: str) -> int:
"""Convert an effect to a pattern code."""
protocol = self.protocol
if protocol in CHRISTMAS_EFFECTS_PROTOCOLS:
return CHRISTMAS_ADDRESSABLE_EFFECT_NAME_ID[effect]
if protocol in NEW_EFFECTS_PROTOCOLS:
return ADDRESSABLE_EFFECT_NAME_ID[effect]
if protocol in OLD_EFFECTS_PROTOCOLS:
return ORIGINAL_ADDRESSABLE_EFFECT_NAME_ID[effect]
return PresetPattern.str_to_val(effect)
@property
def diagnostics(self) -> Dict[str, Any]:
"""Return diagnostics for the device."""
data: Dict[str, Any] = {"device_state": {}, "last_messages": {}}
last_messages = data["last_messages"]
for name, msg in self._last_message.items():
last_messages[name] = " ".join(f"0x{x:02X}" for x in msg)
device_state = data["device_state"]
for name in dir(self):
if name.startswith("_") or name == "diagnostics" or not hasattr(self, name):
continue
value: Any = getattr(self, name)
if is_dataclass(value):
value = asdict(value)
if hasattr(value, "value"):
value = value.value
if value is None or isinstance(value, SERIALIZABLE_TYPES):
device_state[name] = value
return data
| Danielhiversen/flux_led | flux_led/base_device.py | Python | lgpl-3.0 | 47,451 |
import discord
import json
from src.api.discordAPI import DiscordAPI
#loads the config
configData = None
configFile = "./config.json"
with open(configFile) as data_file:
configData = json.load(data_file)
apiOptions = {
"discord": DiscordAPI,
}
apis = []
#starts up the clients
for apiConfig in configData:
newAPI = apiOptions[apiConfig["api"]](apiConfig["token"])
newAPI.run(apiConfig["token"])
# newAPI.run()
apis.append(newAPI) | threedliams/CallbackBot | callbackBot.py | Python | mit | 459 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
MONGODB_SETTINGS = {
'db': 'webtry',
'host': '127.0.0.1',
'port': 27017
}
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| Rcoko/flaskLearn | config.py | Python | mit | 1,418 |
#
# Copyright (C) 2017 Kevin Thornton <krthornt@uci.edu>
#
# This file is part of fwdpy11.
#
# fwdpy11 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fwdpy11 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fwdpy11. If not, see <http://www.gnu.org/licenses/>.
#
def mslike(pop, **kwargs):
"""
Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments.
"""
import fwdpy11
if isinstance(pop, fwdpy11.DiploidPopulation) is False:
raise ValueError("incorrect pop type: " + str(type(pop)))
defaults = {
"simlen": 10 * pop.N,
"beg": 0.0,
"end": 1.0,
"theta": 100.0,
"pneutral": 1.0,
"rho": 100.0,
"dfe": None,
}
for key, value in kwargs.items():
if key in defaults:
defaults[key] = value
import numpy as np
params = {
"simlen": defaults["simlen"],
"nregions": [fwdpy11.Region(defaults["beg"], defaults["end"], 1.0)],
"recregions": [fwdpy11.Region(defaults["beg"], defaults["end"], 1.0)],
"rates": (
(defaults["pneutral"] * defaults["theta"]) / (4.0 * pop.N),
((1.0 - defaults["pneutral"]) * defaults["theta"]) / (4.0 * pop.N),
defaults["rho"] / (4.0 * float(pop.N)),
),
"gvalue": fwdpy11.Multiplicative(2.0),
}
if defaults["dfe"] is None:
params["sregions"] = []
else:
params["sregions"] = [defaults["dfe"]]
return params
| molpopgen/fwdpy11 | fwdpy11/ezparams.py | Python | gpl-3.0 | 2,080 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=bad-whitespace, attribute-defined-outside-init, invalid-name
"""
{{ cookiecutter.project_name }} – {{ cookiecutter.short_description }}.
This setuptools script follows the DRY principle and tries to
minimize repetition of project metadata by loading it from other
places (like the package's `__init__.py`). Incidently, this makes
the script almost identical between different projects.
It is also importable (by using the usual `if __name__ == '__main__'`
idiom), and exposes the project's setup data in a `project` dict.
This allows other tools to exploit the data assembling code contained
in here, and again supports the DRY principle. The `rituals` package
uses that to provide Invoke tasks that work for any project, based on
its project metadata.
Copyright © {{ cookiecutter.year }} {{ cookiecutter.full_name }} <{{ cookiecutter.email }}>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
from collections import defaultdict
# Project data (the rest is parsed from __init__.py and other project files)
name = '{{ cookiecutter.repo_name }}'
package_name = '{{ cookiecutter.pkg_name }}'
# Import setuptools
try:
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
except ImportError as exc:
raise RuntimeError("Cannot install '{0}', setuptools is missing ({1})".format(name, exc))
# Helpers
project_root = os.path.abspath(os.path.dirname(__file__))
def srcfile(*args):
"Helper for path building."
return os.path.join(*((project_root,) + args))
class PyTest(TestCommand):
"""pytest integration into setuptool's `test` command."""
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import locally, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
if errno:
sys.exit(errno)
def _build_metadata(): # pylint: disable=too-many-locals, too-many-branches
"Return project's metadata as a dict."
# Handle metadata in package source
expected_keys = ('url', 'version', 'license', 'author', 'author_email', 'long_description', 'keywords')
metadata = {}
with open(srcfile('src', package_name, '__init__.py')) as handle:
pkg_init = handle.read()
# Get default long description from docstring
metadata['long_description'] = re.search(r'^"""(.+?)^"""$', pkg_init, re.DOTALL|re.MULTILINE).group(1).strip()
for line in pkg_init.splitlines():
match = re.match(r"""^__({0})__ += (?P<q>['"])(.+?)(?P=q)$""".format('|'.join(expected_keys)), line)
if match:
metadata[match.group(1)] = match.group(3)
if not all(i in metadata for i in expected_keys):
raise RuntimeError("Missing or bad metadata in '{0}' package".format(name))
# Load requirements files
requirements_files = dict(
install = 'requirements.txt',
setup = 'setup-requirements.txt',
test = 'test-requirements.txt',
)
requires = {}
for key, filename in requirements_files.items():
requires[key] = []
if os.path.exists(srcfile(filename)):
with open(srcfile(filename), 'r') as handle:
for line in handle:
line = line.strip()
if line and not line.startswith('#'):
if line.startswith('-e'):
line = line.split()[1].split('#egg=')[1]
requires[key].append(line)
if 'pytest' not in requires['test']:
requires['test'].append('pytest')
# CLI entry points
console_scripts = []
for path, _, files in os.walk(srcfile('src', package_name)):
if '__main__.py' in files:
path = path[len(srcfile('src') + os.sep):]
appname = path.split(os.sep)[-1]
with open(srcfile('src', path, '__main__.py')) as handle:
for line in handle.readlines():
match = re.match(r"""^__app_name__ += (?P<q>['"])(.+?)(?P=q)$""", line)
if match:
appname = match.group(2)
console_scripts.append('{0} = {1}.__main__:cli'.format(appname, path.replace(os.sep, '.')))
# Add some common files to EGG-INFO
candidate_files = [
'LICENSE', 'NOTICE',
'README', 'README.md', 'README.rst', 'README.txt',
'CHANGES', 'CHANGELOG', 'debian/changelog',
]
data_files = defaultdict(list)
for filename in candidate_files:
if os.path.exists(srcfile(filename)):
data_files['EGG-INFO'].append(filename)
# Complete project metadata
with open(srcfile('classifiers.txt'), 'r') as handle:
classifiers = [i.strip() for i in handle if i.strip() and not i.startswith('#')]
metadata.update(dict(
name = name,
description = ' '.join(metadata['long_description'].split('.')[0].split()), # normalize whitespace
url = metadata['url'],
package_dir = {'': 'src'},
packages = find_packages(srcfile('src'), exclude=['tests']),
data_files = data_files.items(),
zip_safe = False,
include_package_data = True,
install_requires = requires['install'],
setup_requires = requires['setup'],
tests_require = requires['test'],
classifiers = classifiers,
cmdclass = dict(
test = PyTest,
),
entry_points = dict(
console_scripts = console_scripts,
),
))
return metadata
# Ensure "setup.py" is importable by other tools, to access the project's metadata
project = _build_metadata()
__all__ = ['project', 'project_root', 'package_name', 'srcfile']
if __name__ == '__main__':
setup(**project)
| 1and1/py-generic-project | {{cookiecutter.repo_name}}/setup.py | Python | apache-2.0 | 6,680 |
#
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from spacewalk.common.rhnLog import log_debug
__rhnexport__ = ['update_client_cert']
def update_client_cert(server_id, action_id, data={}):
log_debug(3, action_id)
return
| hustodemon/spacewalk | backend/server/action_extra_data/clientcert.py | Python | gpl-2.0 | 788 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for DBO explorer."""
import sys
import colorama
from termcolor import colored
from lib import arg_parser
from lib import explorer_handler
from lib import parse_input
colorama.init()
def main(parsed_args):
"""Main method for DBO explorer."""
print('Starting DBO explorer...')
ontology = explorer_handler.Build(parsed_args.modified_types_filepath)
done = False
while not done:
try:
print(
'\nHow would you like to query DBO\n' +
'1: Get fields for a type name\n' +
'2: Get types for a list of fields\n' +
'3: Validate a field name\n' +
'q: quit\n'
)
function_choice = input('Please select an option: ')
if function_choice == '1':
parse_input.GetFieldsForTypeName(ontology)
elif function_choice == '2':
parse_input.GetTypesForFieldList(ontology)
elif function_choice == '3':
parse_input.ValidateFieldName(ontology)
elif function_choice == 'q':
print('bye bye')
done = True
else:
print(
'You entered: ' + function_choice + '\n' +
'Please enter a valid input'
)
except TypeError as type_error:
print(colored(type_error, 'red'))
continue
except ValueError as value_error:
print(colored(value_error, 'red'))
continue
except AttributeError as attribute_error:
print(colored(attribute_error, 'red'))
continue
if __name__ == '__main__':
args = arg_parser.ParseArgs().parse_args(sys.argv[1:])
main(args)
| google/digitalbuildings | tools/explorer/explorer.py | Python | apache-2.0 | 2,130 |
# Copyright (c) 2015 Jonathan M. Lange <jml@mumak.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Take the output of an Eliot reporter and turn it into something useful.
"""
from datetime import datetime
import json
from operator import attrgetter
from pyrsistent import PClass, field, freeze, ny, pvector
from toolz.itertoolz import groupby
# TODO: No doubt much of this is more general than eliotreporter, or tests.
# Share it in a better way.
# TODO: Also, this duplicates logic from eliottree (but implemented in a
# different way). Can we reduce the duplication somehow?
# PLAN:
# - stream of JSON to stream of dictionaries
# - dictionaries to Messages (task_uuid, timestamp, task_level, ???, fields)
# - stream of Messages to Tasks and ungrouped Messages
# - each Task to tree of actions
# - find actions that are tests (action_type == 'trial:test')
# - interpret those actions as tests
def fmap(f, x):
return None if x is None else f(x)
def remove_fields(d, fields):
e = d.evolver()
for f in fields:
if f in e:
del e[f]
return e.persistent()
def get_timestamp(contents):
return fmap(datetime.fromtimestamp, contents.get('timestamp'))
class Message(PClass):
"""
A parsed Eliot message.
"""
task_uuid = field()
task_level = field()
timestamp = field()
fields = field()
@classmethod
def new(klass, contents):
fields = remove_fields(
contents, [
'task_uuid',
'task_level',
'timestamp',
])
return klass(
task_uuid=contents.get('task_uuid'),
task_level=contents.get('task_level'),
timestamp=get_timestamp(contents),
fields=fields,
)
def as_dict(self):
fields = self.fields.evolver()
fields['task_uuid'] = self.task_uuid
fields['task_level'] = self.task_level
# XXX: Not quite a full reversal, because the Python APIs for turning
# datetimes into Unix timestamps are awful and jml is too tired and
# lazy to bother right now.
fields['timestamp'] = self.timestamp
return fields.persistent()
def _to_tasks(messages):
return freeze(groupby(attrgetter('task_uuid'), messages))
def _sort_by_level(messages):
return pvector(sorted(messages, key=attrgetter('task_level')))
def to_tasks(messages):
"""Group a sequence of ``Message`` objects by task.
A "task" is a top-level action identified by a ``task_uuid`` field. All
messages that have the same value of ``task_uuid`` are part of the same
task.
Returns a dictionary mapping ``task_uuid`` to a sequence of messages
sorted by task level.
"""
tasks = _to_tasks(messages)
return tasks.transform([ny], _sort_by_level)
class Action(PClass):
"""
An Eliot Action.
"""
end_time = field()
messages = field()
start_time = field()
status = field()
task_uuid = field()
@classmethod
def new(cls, messages):
[task_uuid] = list(set(m.task_uuid for m in messages))
# XXX: Add another layer so we have ActionStart, ActionSuccess, and
# ActionFailed "messages". Then the responsibility of this class is
# merely to assemble those into a coherent representation of an
# Action, raising errors for type validation.
status = messages[-1].fields.get('action_status')
return cls(
messages=pvector(),
status=status,
task_uuid=task_uuid,
start_time=messages[0].timestamp,
end_time=messages[-1].timestamp,
)
def _parse_entry(entry):
"""Parse a single serialized JSON object."""
return freeze(json.loads(entry))
def parse_json_stream(lines):
"""
Parse a stream of JSON objects.
Assumes that ``lines`` is an iterable of serialized JSON objects.
"""
for line in lines:
yield _parse_entry(line.strip())
def main():
from pprint import pprint
from pyrsistent import thaw
import sys
with open(sys.argv[1]) as f:
tasks = to_tasks(Message.new(x) for x in parse_json_stream(f))
pprint(thaw(tasks))
| jml/trial-eliot | eliotreporter/_parse.py | Python | apache-2.0 | 4,718 |
from __future__ import absolute_import, division, print_function, unicode_literals
import semantic_version as semantic_version_module
import six
import os
import re
def not_none(value):
"""
Value cannot be empty
"""
return not value is None
def text(value):
"""
Value can contain any readable character
"""
return True
def identifier(value):
"""
Value can only contain alphanumerical characters and underscore
"""
result = bool(re.match('^[a-zA-Z_$][a-zA-Z_$0-9]*$', value))
if six.PY3:
assert value.isidentifier() == result
return result
def available_path(value):
return not os.path.exists(value)
def creatable_path(value):
"""
Directory must exist or be possible to create
"""
if value[0] != '/':
return False
def exists(path):
if os.path.exists(path):
return path
return exists(os.path.split(path)[0])
if os.path.isdir(value):
return True
return writable_directory(exists(value))
def writable_directory(value):
"""
Must be writeable directory
"""
if not os.path.exists(value):
return True
return os.access(value, os.W_OK)
def empty_directory(value):
"""
Must be en empty directory
"""
if not os.path.exists(value):
return True
return not os.listdir(value)
def semantic_version(value):
"""
Value must be a valid semantic version
"""
try:
semantic_version_module.Version(value)
return True
except ValueError:
return False
def identifier_list(values):
"""
Value must be a list of valid identifiers (alphanumerical + underscore)
"""
return all(identifier(value) for value in values)
def url(value):
"""
Value must be valid url
"""
return True
pattern = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return bool(pattern.match(value))
| aholmback/fuse | fuse/utils/validators.py | Python | mit | 2,276 |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 06 09:36:59 2013
@author: caleb.hattingh
In the spyder IDE, just hit F10 on this file to have profiler output
generated.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import addmodule
addmodule.addpath()
from misu import *
COUNT = 1000000
def testu():
for x in xrange(COUNT):
y = (100*kg/m**3) * (4000*m**3) / (210*g/cm**3 + x*kg/m**3)
def test():
for x in xrange(COUNT):
y = (100) * (4000) / (210 + x)
if __name__ == '__main__':
test()
testu()
| cjrh/misu | misu/profiler_support.py | Python | bsd-2-clause | 582 |
# -*- coding: utf-8 -*-
#
# EAV-Django is a reusable Django application which implements EAV data model
# Copyright © 2009—2010 Andrey Mikhaylenko
#
# This file is part of EAV-Django.
#
# EAV-Django is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EAV-Django is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with EAV-Django. If not, see <http://gnu.org/licenses/>.
# python
from copy import deepcopy
# django
from django.forms import (BooleanField, CharField, CheckboxSelectMultiple,
DateField, FloatField, ModelForm, ModelMultipleChoiceField, #MultipleChoiceField,
ValidationError)
from django.contrib.admin.widgets import AdminDateWidget, FilteredSelectMultiple #, RelatedFieldWidgetWrapper
from django.utils.translation import ugettext_lazy as _
__all__ = ['BaseSchemaForm', 'BaseDynamicEntityForm']
class BaseSchemaForm(ModelForm):
def clean_name(self):
"Avoid name clashes between static and dynamic attributes."
name = self.cleaned_data['name']
reserved_names = self._meta.model._meta.get_all_field_names()
if name not in reserved_names:
return name
raise ValidationError(_('Attribute name must not clash with reserved names'
' ("%s")') % '", "'.join(reserved_names))
class BaseDynamicEntityForm(ModelForm):
"""
ModelForm for entity with support for EAV attributes. Form fields are created
on the fly depending on Schema defined for given entity instance. If no schema
is defined (i.e. the entity instance has not been saved yet), only static
fields are used. However, on form validation the schema will be retrieved
and EAV fields dynamically added to the form, so when the validation is
actually done, all EAV fields are present in it (unless Rubric is not defined).
"""
FIELD_CLASSES = {
'text': CharField,
'float': FloatField,
'date': DateField,
'bool': BooleanField,
'many': ModelMultipleChoiceField, #RelatedFieldWidgetWrapper(MultipleChoiceField),
}
FIELD_EXTRA = {
'date': {'widget': AdminDateWidget},
'many': lambda schema: {
'widget': CheckboxSelectMultiple
if len(schema.get_choices()) <= 5 else
FilteredSelectMultiple(schema.title, is_stacked=False)
},
}
def __init__(self, data=None, *args, **kwargs):
super(BaseDynamicEntityForm, self).__init__(data, *args, **kwargs)
self._build_dynamic_fields()
def check_eav_allowed(self):
"""
Returns True if dynamic attributes can be added to this form.
If False is returned, only normal fields will be displayed.
"""
return bool(self.instance)# and self.instance.check_eav_allowed()) # XXX would break form where stuff is _being_ defined
def _build_dynamic_fields(self):
# reset form fields
self.fields = deepcopy(self.base_fields)
# do not display dynamic fields if some fields are yet defined
if not self.check_eav_allowed():
return
for schema in self.instance.get_schemata():
defaults = {
'label': schema.title.capitalize(),
'required': schema.required,
'help_text': schema.help_text,
}
datatype = schema.datatype
if datatype == schema.TYPE_MANY:
choices = getattr(self.instance, schema.name)
defaults.update({'queryset': schema.get_choices(),
'initial': [x.pk for x in choices]})
extra = self.FIELD_EXTRA.get(datatype, {})
if hasattr(extra, '__call__'):
extra = extra(schema)
defaults.update(extra)
MappedField = self.FIELD_CLASSES[datatype]
self.fields[schema.name] = MappedField(**defaults)
# fill initial data (if attribute was already defined)
value = getattr(self.instance, schema.name)
if value and not datatype == schema.TYPE_MANY: # m2m is already done above
self.initial[schema.name] = value
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance ``self.instance``
and related EAV attributes.
Returns ``instance``.
"""
if self.errors:
raise ValueError("The %s could not be saved because the data didn't"
" validate." % self.instance._meta.object_name)
# create entity instance, don't save yet
instance = super(BaseDynamicEntityForm, self).save(commit=False)
# assign attributes
for name in instance.get_schema_names():
value = self.cleaned_data.get(name)
setattr(instance, name, value)
# save entity and its attributes
if commit:
instance.save()
return instance
save.alters_data = True
def save_m2m(self, *a, **kw):
# stub for admin TODO: check if we don't need to super() if entity indeed has m2m
pass
| omusico/eav-django | eav/forms.py | Python | lgpl-3.0 | 5,690 |
# Copyright 2020 Catalyst Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trove.guestagent.datastore.mariadb import service
from trove.guestagent.datastore.mysql_common import manager
from trove.guestagent.datastore.mysql_common import service as mysql_service
class Manager(manager.MySqlManager):
def __init__(self):
status = mysql_service.BaseMySqlAppStatus(self.docker_client)
app = service.MariaDBApp(status, self.docker_client)
adm = service.MariaDBAdmin(app)
super(Manager, self).__init__(app, status, adm)
def get_start_db_params(self, data_dir):
"""Get parameters for starting database.
Cinder volume initialization(after formatted) may leave a lost+found
folder.
"""
return (f'--ignore-db-dir=lost+found --ignore-db-dir=conf.d '
f'--datadir={data_dir}')
| openstack/trove | trove/guestagent/datastore/mariadb/manager.py | Python | apache-2.0 | 1,400 |
#!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from pathlib import Path
from deeplator import Translator, SOURCE_LANGS
if __name__ == "__main__":
parser = ArgumentParser(
description="Deeplator is an application enabling translation via the DeepL translator."
)
parser.add_argument("-l", "--lang", dest="lang", type=str,
help="""The translation code used for translation.
Use the format AA-BB where AA is the source language
and BB is the output language. Example: EN-DE to
translate from English to German.""")
parser.add_argument("-f", "--file", dest="path", type=Path,
help="Read input from specified file.")
parser.add_argument("-s", "--silent", dest="silent", action='store_true',
help="Print only the translation.")
args = parser.parse_args()
if args.lang:
lang = args.lang.split("-")
if len(lang) != 2:
raise Exception("Invalid translation Code.")
else:
langs = ",".join(SOURCE_LANGS)
print("You did not specify a translation code.")
print("Available languages are {}.".format(langs))
lang = []
lang_tmp = str(input("Source language: "))
lang.append(lang_tmp)
lang_tmp = str(input("Output language: "))
lang.append(lang_tmp)
t = Translator(lang[0], lang[1])
if args.path:
with open(args.path, "r") as src_file:
text = src_file.read()
else:
if not args.silent:
print("Enter the text to be translated. Use Ctrl+D to exit.", file=sys.stderr)
lines = sys.stdin.readlines()
text = "".join(lines)
if not args.silent:
print("-" * 16, file=sys.stderr)
sentences = t.split_into_sentences(text)
translations = t.translate_sentences(sentences)
for sentence in translations:
print(sentence)
| uinput/deeplator | deeplator.py | Python | mit | 2,000 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: subversion
short_description: Deploys a subversion repository.
description:
- Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
version_added: "0.7"
author: "Dane Summers (@dsummersl) <njharman@gmail.com>"
notes:
- Requires I(svn) to be installed on the client.
- This module does not handle externals
requirements: []
options:
repo:
description:
- The subversion URL to the repository.
required: true
aliases: [ name, repository ]
default: null
dest:
description:
- Absolute path where the repository should be deployed.
required: true
default: null
revision:
description:
- Specific revision to checkout.
required: false
default: HEAD
aliases: [ version ]
force:
description:
- If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.
Prior to 1.9 the default was `yes`.
required: false
default: "no"
choices: [ "yes", "no" ]
username:
description:
- --username parameter passed to svn.
required: false
default: null
password:
description:
- --password parameter passed to svn.
required: false
default: null
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to svn executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
checkout:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.3"
description:
- If no, do not check out the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.3"
description:
- If no, do not retrieve new revisions from the origin repository
export:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- If C(yes), do export instead of checkout/update.
switch:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
description:
- If C(no), do not call svn switch before update.
'''
EXAMPLES = '''
# Checkout subversion repository to specified folder.
- subversion:
repo: svn+ssh://an.example.org/path/to/repo
dest: /src/checkout
# Export subversion directory to folder
- subversion:
repo: svn+ssh://an.example.org/path/to/repo
dest: /src/export
# Example just get information about the repository whether or not it has
# already been cloned locally.
- subversion:
repo: svn+ssh://an.example.org/path/to/repo
dest: /srv/checkout
checkout: no
update: no
'''
import re
import tempfile
class Subversion(object):
def __init__(
self, module, dest, repo, revision, username, password, svn_path):
self.module = module
self.dest = dest
self.repo = repo
self.revision = revision
self.username = username
self.password = password
self.svn_path = svn_path
def _exec(self, args, check_rc=True):
'''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''
bits = [
self.svn_path,
'--non-interactive',
'--trust-server-cert',
'--no-auth-cache',
]
if self.username:
bits.extend(["--username", self.username])
if self.password:
bits.extend(["--password", self.password])
bits.extend(args)
rc, out, err = self.module.run_command(bits, check_rc)
if check_rc:
return out.splitlines()
else:
return rc
def is_svn_repo(self):
'''Checks if path is a SVN Repo.'''
rc = self._exec(["info", self.dest], check_rc=False)
return rc == 0
def checkout(self):
'''Creates new svn working directory if it does not already exist.'''
self._exec(["checkout", "-r", self.revision, self.repo, self.dest])
def export(self, force=False):
'''Export svn repo to directory'''
cmd = ["export"]
if force:
cmd.append("--force")
cmd.extend(["-r", self.revision, self.repo, self.dest])
self._exec(cmd)
def switch(self):
'''Change working directory's repo.'''
# switch to ensure we are pointing at correct repo.
self._exec(["switch", self.repo, self.dest])
def update(self):
'''Update existing svn working directory.'''
self._exec(["update", "-r", self.revision, self.dest])
def revert(self):
'''Revert svn working directory.'''
self._exec(["revert", "-R", self.dest])
def get_revision(self):
'''Revision and URL of subversion working directory.'''
text = '\n'.join(self._exec(["info", self.dest]))
rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)
return rev, url
def get_remote_revision(self):
'''Revision and URL of subversion working directory.'''
text = '\n'.join(self._exec(["info", self.repo]))
rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
return rev
def has_local_mods(self):
'''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
# The --quiet option will return only modified files.
# Match only revisioned files, i.e. ignore status '?'.
regex = re.compile(r'^[^?X]')
# Has local mods if more than 0 modified revisioned files.
return len(list(filter(regex.match, lines))) > 0
def needs_update(self):
curr, url = self.get_revision()
out2 = '\n'.join(self._exec(["info", "-r", "HEAD", self.dest]))
head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)
rev1 = int(curr.split(':')[1].strip())
rev2 = int(head.split(':')[1].strip())
change = False
if rev1 < rev2:
change = True
return change, curr, head
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path'),
repo=dict(required=True, aliases=['name', 'repository']),
revision=dict(default='HEAD', aliases=['rev', 'version']),
force=dict(default='no', type='bool'),
username=dict(required=False),
password=dict(required=False, no_log=True),
executable=dict(default=None, type='path'),
export=dict(default=False, required=False, type='bool'),
checkout=dict(default=True, required=False, type='bool'),
update=dict(default=True, required=False, type='bool'),
switch=dict(default=True, required=False, type='bool'),
),
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
revision = module.params['revision']
force = module.params['force']
username = module.params['username']
password = module.params['password']
svn_path = module.params['executable'] or module.get_bin_path('svn', True)
export = module.params['export']
switch = module.params['switch']
checkout = module.params['checkout']
update = module.params['update']
# We screenscrape a huge amount of svn commands so use C locale anytime we
# call run_command()
module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
if not dest and (checkout or update or export):
module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no")
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
if not export and not update and not checkout:
module.exit_json(changed=False, after=svn.get_remote_revision())
if export or not os.path.exists(dest):
before = None
local_mods = False
if module.check_mode:
module.exit_json(changed=True)
elif not export and not checkout:
module.exit_json(changed=False)
if not export and checkout:
svn.checkout()
else:
svn.export(force=force)
elif svn.is_svn_repo():
# Order matters. Need to get local mods before switch to avoid false
# positives. Need to switch before revert to ensure we are reverting to
# correct repo.
if module.check_mode or not update:
check, before, after = svn.needs_update()
module.exit_json(changed=check, before=before, after=after)
before = svn.get_revision()
local_mods = svn.has_local_mods()
if switch:
svn.switch()
if local_mods:
if force:
svn.revert()
else:
module.fail_json(msg="ERROR: modified files exist in the repository.")
svn.update()
else:
module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, ))
if export:
module.exit_json(changed=True)
else:
after = svn.get_revision()
changed = before != after or local_mods
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
main()
| robinro/ansible-modules-core | source_control/subversion.py | Python | gpl-3.0 | 10,389 |
from helper_sql import sqlExecute
def insert(t):
sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t)
| bmng-dev/PyBitmessage | src/helper_sent.py | Python | mit | 132 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_import_public.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_import_public.proto',
package='protobuf_unittest_import',
syntax='proto2',
serialized_pb=_b('\n,google/protobuf/unittest_import_public.proto\x12\x18protobuf_unittest_import\" \n\x13PublicImportMessage\x12\t\n\x01\x65\x18\x01 \x01(\x05\x42\x1a\n\x18\x63om.google.protobuf.test')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PUBLICIMPORTMESSAGE = _descriptor.Descriptor(
name='PublicImportMessage',
full_name='protobuf_unittest_import.PublicImportMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='e', full_name='protobuf_unittest_import.PublicImportMessage.e', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=106,
)
DESCRIPTOR.message_types_by_name['PublicImportMessage'] = _PUBLICIMPORTMESSAGE
PublicImportMessage = _reflection.GeneratedProtocolMessageType('PublicImportMessage', (_message.Message,), dict(
DESCRIPTOR = _PUBLICIMPORTMESSAGE,
__module__ = 'google.protobuf.unittest_import_public_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest_import.PublicImportMessage)
))
_sym_db.RegisterMessage(PublicImportMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.google.protobuf.test'))
# @@protoc_insertion_point(module_scope)
| endlessm/chromium-browser | tools/swarming_client/third_party/google/protobuf/unittest_import_public_pb2.py | Python | bsd-3-clause | 2,325 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+----------------------------------------------
#| Global Imports
#+----------------------------------------------
import uuid
import math
#+----------------------------------------------
#| Local Imports
#+----------------------------------------------
from netzob import _libRelation
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.AbstractType import AbstractType
from netzob.Common.Models.Types.Raw import Raw
from netzob.Common.Models.Types.Decimal import Decimal
from netzob.Common.Models.Vocabulary.AbstractField import AbstractField
@NetzobLogger
class RelationFinder(object):
"""Provides multiple algorithms to find relations between messages.
>>> import binascii
>>> from netzob.all import *
>>> samples = ["0007ff2f000000000000", "0011ffaaaaaaaaaaaaaabbcc0010000000000000", "0012ffddddddddddddddddddddfe1f000000000000"]
>>> messages = [RawMessage(data=binascii.unhexlify(sample)) for sample in samples]
>>> symbol = Symbol(messages=messages)
>>> Format.splitStatic(symbol)
>>> rels = RelationFinder.findOnFields(symbol.fields[1], symbol.fields[3])
>>> print len(rels)
1
>>> for rel in rels:
... print rel["relation_type"] + " between " + rel["x_field"].name + ":" + rel["x_attribute"] + \
" and " + rel["y_field"].name + ":" + rel["y_attribute"]
SizeRelation between Field-1:value and Field-3:size
"""
# Field's attributes
ATTR_VALUE = "value"
ATTR_SIZE = "size"
AVAILABLE_ATTRIBUTES = [ATTR_VALUE, ATTR_SIZE]
# Relation types
REL_SIZE = "SizeRelation"
REL_DATA = "DataRelation"
REL_EQUALITY = "EqualityRelation"
REL_UNKNOWN = "Unknown"
def __init__(self):
pass
@staticmethod
@typeCheck(AbstractField)
def findOnSymbol(symbol):
"""Find exact relations between fields in the provided
symbol/field.
:param symbol: the symbol in which we are looking for relations
:type symbol: :class:`netzob.Common.Models.Vocabulary.AbstractField.AbstractField`
"""
rf = RelationFinder()
return rf.executeOnSymbol(symbol)
@staticmethod
@typeCheck(AbstractField, AbstractField, str, str)
def findOnFields(x_field, y_field, x_attribute=None, y_attribute=None):
"""Find exact relations between the provided fields, according
to their optional specified attributes.
"""
rf = RelationFinder()
return rf.executeOnFields(x_field, y_field, x_attribute, y_attribute)
# @typeCheck(AbstractField)
# def executeOnSymbol(self, symbol):
# """
# :param symbol: the symbol in which we are looking for relations
# :type symbol: :class:`netzob.Common.Models.Vocabulary.AbstractField.AbstractField`
# """
# cells = [field.getValues(encoded=False, styled=False)
# for field in symbol.getExtendedFields()
# #if not field.isStatic()
# ]
# if cells:
# # Invert array dimensions liks this:
# # < [[m0f0, m1f0, ...], [m0f1, m1f1, ...]]
# # > [(m0f0, m0f1, ...), (m1f0, m1f1, ...)]
# for algo, refs in _libRelation.find(zip(*cells)).items():
# for ref_idx, ref_off, ref_size, rels in refs:
# print "Relations(%s) with F%d:" % (algo, ref_idx)
# for rel_id, rel_conf in enumerate(rels):
# print " %d. F[%d][%d:%d]" % ((rel_id,) + rel_conf)
# def executeOnCells(self, cellsTable):
# if cellsTable:
# # Invert array dimensions liks this:
# # < [[m0f0, m1f0, ...], [m0f1, m1f1, ...]]
# # > [(m0f0, m0f1, ...), (m1f0, m1f1, ...)]
# for algo, refs in _libRelation.find(zip(*cellsTable)).items():
# for ref_idx, ref_off, ref_size, rels in refs:
# print "Relations(%s) with F%d:" % (algo, ref_idx)
# for rel_id, rel_conf in enumerate(rels):
# print " %d. F[%d][%d:%d]" % ((rel_id,) + rel_conf)
@typeCheck(AbstractField)
def executeOnSymbol(self, symbol):
"""Find exact relations between fields of the provided symbol.
"""
(attributeValues_headers, attributeValues) = self._generateAttributeValuesForSymbol(symbol)
results = []
for i, x_values in enumerate(attributeValues[:-1]):
for j, y_values in enumerate(attributeValues[:]):
if j <= i:
continue
isRelation = True
for k in range(len(x_values)):
if not (x_values[k] == y_values[k]):
isRelation = False
break
if isRelation:
# Do no keep relations where a field's values does not change
if len(set(x_values)) == 1 or len(set(y_values)) == 1:
continue
(x_fields, x_attribute) = attributeValues_headers[i]
(y_fields, y_attribute) = attributeValues_headers[j]
# The relation should not apply on the same field
if len(x_fields) == 1 and len(y_fields) == 1 and x_fields[0].id == y_fields[0].id:
continue
relation_type = self._findRelationType(x_attribute, y_attribute)
# We do not consider unqualified relation (for example, the size of a field is linked to the size of another field)
if relation_type == self.REL_UNKNOWN:
continue
# DataRelation should produce an empty intersection between related fields
if relation_type == self.REL_DATA and len(set(x_fields).intersection(set(y_fields))) > 0:
continue
self._logger.debug("Relation found between '" + str(x_fields) + ":" + x_attribute + "' and '" + str(y_fields) + ":" + y_attribute + "'")
id_relation = str(uuid.uuid4())
results.append({'id': id_relation,
"relation_type": relation_type,
'x_fields': x_fields,
'x_attribute': x_attribute,
'y_fields': y_fields,
'y_attribute': y_attribute})
return results
@typeCheck(AbstractField, AbstractField, str, str)
def executeOnFields(self, x_field, y_field, x_attribute=None, y_attribute=None):
"""Find exact relations between fields according to their
optional selected attributes.
"""
results = []
# Convert cells according to their interesting attribute (data, size or offset)
if x_attribute == self.ATTR_SIZE and y_attribute == self.ATTR_SIZE: # A relation between two size field is uncertain...
return results
x_values = x_field.getValues(encoded=False, styled=False)
y_values = y_field.getValues(encoded=False, styled=False)
# Select attributes for fields comparison
if x_attribute is None:
x_attributes = self.AVAILABLE_ATTRIBUTES
else:
x_attributes = [x_attribute]
if y_attribute is None:
y_attributes = self.AVAILABLE_ATTRIBUTES
else:
y_attributes = [y_attribute]
# Try to find a relation that matches each cell
relation_fcts = {}
relation_fcts[self.REL_SIZE] = self._sizeRelation
relation_fcts[self.REL_EQUALITY] = self._equalRelation
for x_attribute in x_attributes:
for y_attribute in y_attributes:
for (relation_name, relation_fct) in relation_fcts.items():
isRelation = True
for i in range(len(x_values)):
if not relation_fct(x_values[i], x_attribute, y_values[i], y_attribute):
isRelation = False
break
if isRelation:
self._logger.debug("Relation found between '" + x_attribute + ":" + str(x_field.name) + "' and '" + y_attribute + ":" + str(y_field.name) + "'")
self._logger.debug(" Relation: " + relation_name)
id_relation = str(uuid.uuid4())
results.append({'id': id_relation,
"relation_type": relation_name,
'x_field': x_field,
'x_attribute': x_attribute,
'y_field': y_field,
'y_attribute': y_attribute})
return results
def _findRelationType(self, x_attribute, y_attribute):
typeRelation = self.REL_UNKNOWN
if (x_attribute == self.ATTR_VALUE and y_attribute == self.ATTR_SIZE) or (x_attribute == self.ATTR_SIZE and y_attribute == self.ATTR_VALUE):
typeRelation = self.REL_SIZE
elif x_attribute == x_attribute == self.ATTR_VALUE:
typeRelation = self.REL_DATA
return typeRelation
def _equalRelation(self, x, x_attribute, y, y_attribute):
if x == y:
return True
else:
return False
def _sizeRelation(self, x, x_attribute, y, y_attribute):
if x_attribute == self.ATTR_SIZE:
if len(x) > 0:
x = len(x)
else:
if len(x) > 0:
x = TypeConverter.convert(x[:8], Raw, Decimal)
else:
x = 0
if y_attribute == self.ATTR_SIZE:
if len(y) > 0:
y = len(y)
else:
if len(y) > 0:
y = TypeConverter.convert(y[:8], Raw, Decimal)
else:
y = 0
if x == y:
return True
else:
return False
def _generateAttributeValuesForSymbol(self, symbol):
# First we compute the possible list of payloads
lines_data = []
line_header = []
# Compute the list of values for each field
(fields, fieldsValues) = self._getAllFieldsValues(symbol)
# Compute the table of concatenation of values
for i in range(len(fieldsValues[:])):
for j in range(i+1, len(fieldsValues)+1):
# We generate the data
concatCellsData = self._generateConcatData(fieldsValues[i:j])
# We generate lines and header for fields values
line_header.append((fields[i:j], self.ATTR_VALUE))
lines_data.append(self._generateDataValues(concatCellsData))
# We generate lines and header for fields values
line_header.append((fields[i:j], self.ATTR_SIZE))
lines_data.append(self._generateSizeValues(concatCellsData))
# # # Now we generate values for fields sizes
# # (multipleSize_Header, multipleSize_lines) = self._generateSizeFieldFromBeginingOfField(symbol)
# # line_header.extend(multipleSize_Header)
# # for i_line in range(0, len(lines)):
# # lines[i_line] = lines[i_line] + "," + multipleSize_lines[i_line]
# # # Now we generate values for CRC32
# # (crc32Header, crc32Lines) = self._generateCRC32(symbol)
# # line_header.extend(crc32Header)
# # for i_line in range(0, len(lines)):
# # line = lines[i_line]
# # lines[i_line] = line + "," + crc32Lines[i_line]
return (line_header, lines_data)
def _getAllFieldsValues(self, field):
# This recursive function returns a tuple containing
# (array of all fields, array of values of each field)
if len(field.fields) > 0:
fields = []
values = []
for f in field.fields:
(retFields, retValues) = self._getAllFieldsValues(f)
fields.extend(retFields)
values.extend(retValues)
return (fields, values)
else:
return ([field], [field.getValues(encoded=False, styled=False)])
def _generateConcatData(self, cellsDataList):
"""Generates the concatenation of each cell of each field.
Example:
cellsData_1 = ["a", "aa", "aaa"]
cellsData_2 = ["b", "bb", "bbb"]
res = ["ab", "aabb", "aaabbb"]
"""
if len(cellsDataList) < 1:
return []
result = ["" for cell in cellsDataList[0]]
for cellsData in cellsDataList:
for i, data in enumerate(cellsData):
result[i] += data
return result
def _generateDataValues(self, cellsData):
result = []
for data in cellsData:
if len(data) > 0:
data = data[:8] # We take at most 8 bytes
unitSize = int(AbstractType.UNITSIZE_8) * len(data)
unitSize = int(pow(2, math.ceil(math.log(unitSize, 2)))) # Round to the nearest upper power of 2
result.append(Decimal.encode(data, endianness=AbstractType.ENDIAN_BIG, unitSize=str(unitSize)))
else:
result.append(0)
return result
def _generateSizeValues(self, cellsData):
result = []
for data in cellsData:
if len(data) > 0:
result.append(len(data)) # Size in octets
else:
result.append(0)
return result
| dasbruns/netzob | src/netzob/Inference/Vocabulary/RelationFinder.py | Python | gpl-3.0 | 15,650 |
from django.conf.urls import url
from .views import PollsListView, PollsDetailView, PollsResultsView, vote
urlpatterns = [
url(regex=r'^$', view=PollsListView.as_view(), name='list'),
url(regex=r'^(?P<pk>\d+)$', view=PollsDetailView.as_view(), name='detail'),
url(regex=r'^(?P<pk>\d+)/results/$', view=PollsResultsView.as_view(), name='results'),
url(regex=r'^(?P<poll_id>\d+)/vote/$', view=vote, name='vote'),
] | jhough/django-inform | inform/polls/urls.py | Python | bsd-3-clause | 430 |
import asyncio
import aioredis
async def main():
pool = await aioredis.create_pool(
'redis://localhost')
# async with pool.get() as conn:
await pool.execute('set', 'my-key', 'value')
await async_with(pool)
await with_await(pool)
pool.close()
await pool.wait_closed()
async def async_with(pool):
async with pool.get() as conn:
value = await conn.execute('get', 'my-key')
print('raw value:', value)
async def with_await(pool):
# This is exactly the same as:
# with (yield from pool) as conn:
with (await pool) as conn:
value = await conn.execute('get', 'my-key')
print('raw value:', value)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| ymap/aioredis | examples/pool2.py | Python | mit | 784 |
# -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
""" TODO Docstring. """
import argparse
import logging
import sys
import h5py
import numpy as np
from PIL import Image
from matplotlib import colors
from matplotlib import cm
from openradar import calc
from openradar import config
logger = logging.getLogger(__name__)
def get_parser():
""" Return argument parser. """
parser = argparse.ArgumentParser(
description=__doc__
)
parser.add_argument(
'paths',
nargs='+',
metavar='FILES',
)
return parser
def command(paths):
""" Show a lowest elevation image. """
# order
index = dict(
NL60=0,
NL61=1,
nhb=2,
ess=3,
emd=4,
JAB=5,
)
# load
d_rang, d_elev, d_anth = {}, {}, {}
for p in paths:
with h5py.File(p, 'r') as h5:
for r in h5:
if r in d_rang or r == 'ase':
continue
d_rang[r] = h5[r]['range'][:]
d_elev[r] = h5[r]['elevation'][:]
d_anth[r] = h5[r].attrs['antenna_height']
radars = d_anth.keys()
elev = np.ma.empty((len(radars),) + d_rang[radars[0]].shape)
rang = np.ma.empty((len(radars),) + d_rang[radars[0]].shape)
anth = np.empty((len(radars), 1, 1))
for r in radars:
elev[index[r]] = np.ma.masked_equal(d_elev[r], config.NODATAVALUE)
rang[index[r]] = np.ma.masked_equal(d_rang[r], config.NODATAVALUE)
anth[index[r]] = float(d_anth[r]) / 1000
# calculate
theta = calc.calculate_theta(
rang=rang, elev=np.radians(elev), anth=anth,
)
alt = calc.calculate_height(
theta=theta, elev=np.radians(elev), anth=anth,
)
which = np.ma.where(
alt == alt.min(0),
np.indices(alt.shape)[0],
-1,
).max(0)
what = alt.min(0)
# colors
hue = cm.hsv(colors.Normalize(vmax=len(radars))(which), bytes=True)
sat = 1 - colors.Normalize(vmax=5, clip=True)(what)[..., np.newaxis]
hue[..., :3] *= sat
hue[sat.mask[..., 0]] = 255
Image.fromarray(hue).save('elevation_image.png')
return 0
def main():
""" Call command with args from parser. """
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
return command(**vars(get_parser().parse_args()))
if __name__ == '__main__':
exit(main())
| nens/openradar | openradar/scripts/elevation_image.py | Python | gpl-3.0 | 2,413 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import pandas as pd
import numpy as np
import csv
def savePerturbedData(df, filename):
outputFile = open(filename, "w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(['timestamp', 'passenger_count', 'timeofday', 'dayofweek'])
csvWriter.writerow(['datetime', 'int', 'int', 'string'])
csvWriter.writerow(['T', '', '', ''])
for i in range(len(df)):
csvWriter.writerow([df.time[i], df.data[i], df.timeofday[i], df.dayofweek[i]])
outputFile.close()
dataSet = 'nyc_taxi'
filePath = dataSet+'.csv'
df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data', 'timeofday', 'dayofweek'])
# create a new daily profile
dailyTime = np.sort(df['timeofday'].unique())
dailyHour = dailyTime/60
profile = np.ones((len(dailyTime),))
# decrease 7am-11am traffic by 20%
profile[np.where(np.all([dailyHour >= 7.0, dailyHour < 11.0], axis=0))[0]] = 0.8
# increase 21:00 - 24:00 traffic by 20%
profile[np.where(np.all([dailyHour >= 21.0, dailyHour <= 23.0], axis=0))[0]] = 1.2
dailyProfile = {}
for i in range(len(dailyTime)):
dailyProfile[dailyTime[i]] = profile[i]
# apply the new daily pattern to weekday traffic
old_data = df['data']
new_data = np.zeros(old_data.shape)
for i in xrange(len(old_data)):
if df['dayofweek'][i] < 5:
new_data[i] = old_data[i] * dailyProfile[df['timeofday'][i]]
else:
new_data[i] = old_data[i]
filename = 'nyc_taxi_perturb.csv'
df.loc[13152:, 'data'] = new_data[13152:]
savePerturbedData(df, filename)
filename = 'nyc_taxi_perturb_baseline.csv'
df.loc[:, 'data'] = new_data[:]
savePerturbedData(df, filename) | BoltzmannBrain/nupic.research | projects/sequence_prediction/continuous_sequence/data/generatePerturbedNYCtaxiData.py | Python | agpl-3.0 | 2,563 |
"""
Python module defining a class for creating movies of matplotlib figures.
This code and information is provided 'as is' without warranty of any kind,
either express or implied, including, but not limited to, the implied
warranties of non-infringement, merchantability or fitness for a particular
purpose.
"""
from functools import partial
import shutil
import subprocess
import tempfile
import matplotlib as mpl
import matplotlib.pyplot as plt
def invert_color(color):
""" Returns the inverted value of a matplotlib color """
# get the color value
c = invert_color.cc.to_rgba(color)
# keep alpha value intact!
return (1-c[0], 1-c[1], 1-c[2], c[3])
# initialize the color converted and keep it as a static variable
invert_color.cc = mpl.colors.ColorConverter()
def invert_colors(fig):
""" Changes the colors of a figure to their inverted values """
# keep track of the object that have been changed
visited = set()
def get_filter(name):
""" construct a specific filter for `findobj` """
return lambda x: hasattr(x, 'set_%s'%name) and hasattr(x, 'get_%s'%name)
for o in fig.findobj(get_filter('facecolor')):
if o not in visited:
o.set_facecolor(invert_color(o.get_facecolor()))
if hasattr(o, 'set_edgecolor') and hasattr(o, 'get_edgecolor'):
o.set_edgecolor(invert_color(o.get_edgecolor()))
visited.add(o)
for o in fig.findobj(get_filter('color')):
if o not in visited:
o.set_color(invert_color(o.get_color()))
visited.add(o)
class Movie(object):
""" Class for creating movies from matplotlib figures using ffmpeg """
def __init__(self,
width=None, filename=None, inverted=False, verbose=True,
framerate=None
):
self.width = width #< pixel width of the movie
self.filename = filename #< filename used to save the movie
self.inverted = inverted #< colors inverted?
self.verbose = verbose #< verbose encoding information?
self.framerate = framerate #< framerate of the movie
# internal variables
self.recording = False
self.tempdir = None
self.frame = 0
self._start()
def __del__(self):
self._end()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if self.filename is not None:
self.save(self.filename)
self._end()
return False
def _start(self):
""" initializes the video recording """
# create temporary directory for the image files of the movie
self.tempdir = tempfile.mkdtemp(prefix='movie_')
self.frame = 0
self.recording = True
def _end(self):
""" clear up temporary things if necessary """
if self.recording:
shutil.rmtree(self.tempdir)
self.recording = False
def clear(self):
""" delete current status and start from scratch """
self._end()
self._start()
def _add_file(self, save_function):
"""
Adds a file to the current movie
"""
if not self.recording:
raise ValueError('Movie is not initialized.')
save_function("%s/frame_%09d.png" % (self.tempdir, self.frame))
self.frame += 1
def add_image(self, image):
"""
Adds the data of a PIL image as a frame to the current movie.
"""
if self.inverted:
try:
import ImageOps
except ImportError:
from PIL import ImageOps
image_inv = ImageOps.invert(image)
self._add_file(image_inv.save)
else:
self._add_file(image.save)
def add_array(self, data, colormap=None):
"""
Adds the data from the array as a frame to the current movie.
The array is assumed to be scaled to [0, 1].
(0, 0) lies in the upper left corner of the image.
The first axis extends toward the right, the second toward the bottom
"""
# get colormap
if colormap is None:
import matplotlib.cm as cm
colormap = cm.gray
# produce image
try:
import Image
except ImportError:
from PIL import Image
import numpy as np
grey_data = colormap(np.clip(data.T, 0, 1))
im = Image.fromarray(np.uint8(grey_data*255))
# save image
self.add_image(im)
def add_figure(self, fig=None):
""" adds the figure `fig` as a frame to the current movie """
if fig is None:
fig = plt.gcf()
if self.width is None:
dpi = None
else:
dpi = self.width/fig.get_figwidth()
# save image
if self.inverted:
invert_colors(fig)
save_function = partial(
fig.savefig,
dpi=dpi, edgecolor='none',
facecolor=invert_color(fig.get_facecolor())
)
self._add_file(save_function)
invert_colors(fig)
else:
save_function = partial(fig.savefig, dpi=dpi)
self._add_file(save_function)
def save_frames(self, filename_pattern='./frame_%09d.png', frames='all'):
""" saves the given `frames` as images using the `filename_pattern` """
if not self.recording:
raise ValueError('Movie is not initialized.')
if 'all' == frames:
frames = range(self.frame)
for f in frames:
shutil.copy(
"%s/frame_%09d.png" % (self.tempdir, f),
filename_pattern % f
)
def save(self, filename, extra_args=None):
""" convert the recorded images to a movie using ffmpeg """
if not self.recording:
raise ValueError('Movie is not initialized.')
# set parameters
if extra_args is None:
extra_args = []
if self.framerate is not None:
extra_args += ["-r", self.framerate]
if filename is None:
filename = self.filename
# construct the call to ffmpeg
# add the `-pix_fmt yuv420p` switch for compatibility reasons
# -> http://ffmpeg.org/trac/ffmpeg/wiki/x264EncodingGuide
args = ["ffmpeg"]
if extra_args:
args += extra_args
args += [
"-y", # don't ask questions
"-f", "image2", # input format
"-i", "%s/frame_%%09d.png" % self.tempdir, # input data
"-pix_fmt", "yuv420p", # pixel format for compatibility
"-b:v", "1024k", # high bit rate for good quality
filename # output file
]
# spawn the subprocess and capture its output
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.stdout.read()
err = p.stderr.read()
# check if error occurred
if p.wait():
print(out)
print(err)
raise RuntimeError('An error occurred while producing the movie.')
# do output anyway, when verbosity is requested
if self.verbose:
print(out)
print(err)
def test_movie_making():
""" Simple test code for movie making """
try:
# try python2 version
filename = raw_input('Choose a file name: ')
except NameError:
# python3 fallback
filename = input('Choose a file name: ')
import numpy as np
# prepare data
x = np.linspace(0, 10, 100)
lines, = plt.plot(x, np.sin(x))
plt.ylim(-1, 1)
with Movie(filename=filename) as movie:
for k in range(30):
lines.set_ydata(np.sin(x + 0.1*k))
movie.add_frame()
if __name__ == "__main__":
print('This file is intended to be used as a module.')
print('This code serves as a test for the defined methods.')
test_movie_making()
| david-zwicker/python-functions | movie_making.py | Python | gpl-2.0 | 8,083 |
from django.conf.urls import url
from django.views.generic.base import RedirectView
from states.views import state_action_confirm, state_action, state_action_ajax, report, state_history
urlpatterns = [
url(r'^action/confirm/(?P<state_pk>\d+)/(?P<action>.*)/noparam/$', state_action_confirm, {'next': 'state_action_noparam'}, name='state_action_confirm_noparam'),
url(r'^action/confirm/(?P<state_pk>\d+)/(?P<action>.*)/$', state_action_confirm, name='state_action_confirm'),
url(r'^action/(?P<state_pk>\d+)/(?P<action>.*)/noparam/$', state_action, {'no_param': True}, name='state_action_noparam'),
url(r'^action/(?P<state_pk>\d+)/(?P<action>.*)/$', state_action, name='state_action'),
url(r'^ajax/action/$', state_action_ajax, name='state_action_ajax'),
url(r'^history/$', state_history, name='states_state_history'),
url(r'^(?P<content_type>[a-z\._\d+]+)/reports/$', RedirectView.as_view(url='-1/'), name='states_overview'),
url(r'^(?P<content_type>[a-z\._\d+]+)/reports/(?P<report_id>-?\d+)/$', report, name='states_report'),
url(r'^(?P<content_type>[\d+]+)/reports/(?P<report_id>-?\d+)/order_by/(?P<order_column>-?\d+)/(?P<order_direction>[AD])/$', report, name='states_report_order_by'),
]
| vikingco/django-states | src/states/urls.py | Python | bsd-3-clause | 1,232 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from marionette_driver import By, expected, Wait
from marionette_driver.errors import MarionetteException
from firefox_ui_harness.testcases import FirefoxTestCase
class TestSSLDisabledErrorPage(FirefoxTestCase):
def setUp(self):
FirefoxTestCase.setUp(self)
self.url = 'https://tlsv1-0.mozqa.com'
self.utils.sanitize({"sessions": True})
# Disable SSL 3.0, TLS 1.0 and TLS 1.1 for secure connections
# by forcing the use of TLS 1.2
# see: http://kb.mozillazine.org/Security.tls.version.*#Possible_values_and_their_effects
self.prefs.set_pref('security.tls.version.min', 3)
self.prefs.set_pref('security.tls.version.max', 3)
def test_ssl_disabled_error_page(self):
with self.marionette.using_context('content'):
# Open the test page
self.assertRaises(MarionetteException, self.marionette.navigate, self.url)
# Wait for the DOM to receive events
time.sleep(1)
# Verify "Secure Connection Failed" error page title
title = self.marionette.find_element(By.CLASS_NAME, 'title-text')
nss_failure2title = self.browser.get_entity('nssFailure2.title')
self.assertEquals(title.get_property('textContent'), nss_failure2title)
# Verify the error message is correct
short_description = self.marionette.find_element(By.ID, 'errorShortDescText')
self.assertIn('SSL_ERROR_UNSUPPORTED_VERSION',
short_description.get_property('textContent'))
self.assertIn('mozqa.com', short_description.get_property('textContent'))
# Verify that the "Restore" button appears and works
reset_button = self.marionette.find_element(By.ID, 'prefResetButton')
reset_button.click()
# With the preferences reset, the page has to load correctly
Wait(self.marionette).until(expected.element_present(By.LINK_TEXT,
'http://quality.mozilla.org'))
| cstipkovic/spidermonkey-research | testing/firefox-ui/tests/functional/security/test_ssl_disabled_error_page.py | Python | mpl-2.0 | 2,300 |
# -*- coding: utf-8 -*-
"""
news_data.pipeline.metric_writer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[Pipeline Step 4 of 4]
This module subscribes to the 'analyzed_articles' queue and listens
for jobs. When it receives a job, it reads the analyzed article
results and creates metric data. This is then written to the DB.
:license: MIT, see LICENSE for more details.
"""
import argparse
from datetime import datetime
from datetime import timedelta
import date_util
from db import mongo
import queue
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Temp -- used for monitoring performance
# Daily write/read/create time
dw_time = timedelta(days=0)
dr_time = timedelta(days=0)
dc_time = timedelta(days=0)
# Monthly write/read/create time
mw_time = timedelta(days=0)
mr_time = timedelta(days=0)
mc_time = timedelta(days=0)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
verbose = False
updt_freq = 1000
metricized_articles = 0
terms_processed = 0
docs_created_daily = 0
docs_created_monthly = 0
connection = None
consume_channel = None
db_analyzed_articles = None
db_metric_data_daily = None
db_metric_data_monthly = None
def init_db_and_queue():
global db_analyzed_articles, db_metric_data_daily, db_metric_data_monthly,\
connection, consume_channel
# Init DB
db_analyzed_articles = mongo.get_analyzed_articles()
db_metric_data_daily = mongo.get_metric_data_daily()
db_metric_data_monthly = mongo.get_metric_data_monthly()
# Init connection and channels to RabbitMQ
connection, consume_channel = queue.init_connection()
queue.init_analyzed_articles(consume_channel)
def analyzed_articles_consumer(channel, method, header, body):
create_metrics_for_article(body)
channel.basic_ack(delivery_tag = method.delivery_tag)
def start_consuming_analyzed_articles():
try:
print " Metric Writer Started..."
queue.consume_analyzed_articles(consume_channel,
analyzed_articles_consumer)
finally:
queue.close_connection(connection)
def create_metrics_for_article(article_id, preview=False):
global metricized_articles, terms_processed
# Get article from the DB...
analyzed_article = read_analyzed_article_from_db(article_id)
# Create metrics...
if analyzed_article:
# There are a few different approaches to consider when writing
# metric data.
# (1) Ensure documents are allocated, and then upsert data for
# the change
# (2) Upsert an entire doc each time, where all values are zero
# except one.
# (3) Upsert daily term docs one at a time, then aggregate into
# higher level data later
#
# >> Currently selecting to do the former approach. Results in
# more small reads to the DB, but smaller writes.
# Get needed date values
published = analyzed_article["published"]
yyyy = published.year
mm = published.month
dd = published.day
first_of_month = datetime(yyyy, mm, 1)
days_in_curr_month = date_util.get_days_in_month(yyyy, published.month)
# Iterate over each term in the term histogram
term_histogram = analyzed_article["term_histogram"]
for term in term_histogram:
terms_processed += 1
if not preview:
update_daily_metrics(term, yyyy, mm, dd, first_of_month,
days_in_curr_month, term_histogram[term])
update_monthly_metrics(term, yyyy, mm, term_histogram[term])
# Increase count and update status after each article...
metricized_articles += 1
if preview or metricized_articles % updt_freq == 0:
print " * Articles Metricized: %d..." % metricized_articles
print " Terms: %d Daily Docs %d Monthly Docs %d" % \
(terms_processed, docs_created_daily, docs_created_monthly)
print " Monthly: Read: %s, Create: %s, Write: %s" % \
(mr_time, mc_time, mw_time)
print " Daily: Read: %s, Create: %s, Write: %s" % \
(dr_time, dc_time, dw_time)
else:
print " ERROR: No document with id of '%s' in DB" % article_id
def read_analyzed_article_from_db(article_id):
analyzed_article = db_analyzed_articles.find_one({ "_id" : article_id})
return analyzed_article
def update_daily_metrics(term, yyyy, mm, dd, first_of_month,
days_in_curr_month, term_count):
global docs_created_daily, dr_time, dw_time, dc_time
# Create the metric identifier
id_daily_metric = {
"_id" : {
"term" : term,
"yyyy" : yyyy,
"mm" : date_util.pad_month_day_value(mm)
}
}
# Check if a doc for this identifier already exists, if not
# allocate the doc
r_time = datetime.now()
if (db_metric_data_daily.find(id_daily_metric).count() == 0):
dr_time += (datetime.now() - r_time)
c_time = datetime.now()
docs_created_daily += 1
metric_doc_daily = {
"_id" : id_daily_metric["_id"],
"term" : term,
"date" : first_of_month,
"daily": {}
}
for day in range(1, days_in_curr_month + 1):
metric_doc_daily["daily"][str(day)] = 0
db_metric_data_daily.insert(metric_doc_daily)
dc_time += (datetime.now() - c_time)
else:
dr_time += (datetime.now() - r_time)
# Update the daily metric data with this value
w_time = datetime.now()
metric_update_daily = {"$inc" : {"daily." + str(dd) : term_count}}
db_metric_data_daily.update(id_daily_metric, metric_update_daily,
True) # True for upsert
dw_time += (datetime.now() - w_time)
def update_monthly_metrics(term, yyyy, mm, term_count):
global docs_created_monthly, mr_time, mw_time, mc_time
# Create the metric identifier
id_monthly_metric = {
"_id" : {
"term" : term
}
}
# Check if a doc for this identifier already exists, if not
# allocate the doc
r_time = datetime.now()
if (db_metric_data_monthly.find(id_monthly_metric).count() == 0):
mr_time += (datetime.now() - r_time)
c_time = datetime.now()
docs_created_monthly += 1
metric_doc_monthly = {
"_id" : id_monthly_metric["_id"],
"term" : term
}
for yyyy in range(2000, 2014):
metric_doc_monthly[str(yyyy)] = {}
for mm in range(1, 13):
metric_doc_monthly[str(yyyy)][str(mm)] = 0
db_metric_data_monthly.insert(metric_doc_monthly)
mc_time += (datetime.now() - c_time)
else:
mr_time += (datetime.now() - r_time)
# Update the monthly metric data with this value
w_time = datetime.now()
metric_update_monthly = {"$inc" : {str(yyyy) + "." + str(mm) : term_count}}
db_metric_data_monthly.update(id_monthly_metric,
metric_update_monthly, True) # True for upsert
mw_time += (datetime.now() - w_time)
def parse_args():
""" Parse the command line arguments
"""
global verbose, updt_freq
parser = argparse.ArgumentParser(description="Listens to queue for\
analyzed articles to create metrics for, or optionally\
create metrics for a given article id argument")
parser.add_argument("-v", "--verbose", action='store_true',
help="Make the operation talkative")
parser.add_argument("-p", "--preview", action='store_true',
help="Preview only, don't persist results.")
parser.add_argument("-u", "--updt_freq", type=int, default=1000,
help="Frequency to print an update")
parser.add_argument("-i", "--id", help="Id of article to parse")
args = parser.parse_args()
verbose = args.verbose
updt_freq = args.updt_freq
return args
if __name__ == "__main__":
args = parse_args()
print "----------------------------------------------< metric_writer >----"
init_db_and_queue()
# If an article id is provided as an argument, create metrics for
# it. Otherwise, start consuming msgs from the queue.
if args.id:
create_metrics_for_article(args.id.strip(), args.preview)
else:
start_consuming_analyzed_articles() | lbracken/news_data | pipeline/metric_writer.py | Python | mit | 8,432 |
# Copyright (C) 2017 Matteo Franchin
#
# This file is part of Pyrtist.
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
'''Implementation of Put functionality for DeepWindows.
This module allows passing Put objects to DeepWindow in order to place a
DeepWindow inside another DeepWindow, similarly to what is done for Windows
in the pyrtist.lib2d module.
'''
__all__ = ('SimplePut', 'Put', 'Near')
from ..lib2d import combination, SimplePut, Put, Near
from .core_types import Point, Point3, DeepMatrix
from .deep_window import DeepWindow
from .cmd_stream import DeepCmdArgFilter
@combination(SimplePut, DeepWindow, 'SimplePut')
def simple_put_at_deep_window(simple_put, deep_window):
flt = DeepCmdArgFilter.from_matrix(simple_put.matrix or DeepMatrix())
deep_window.cmd_stream.add(simple_put.get_window().cmd_stream, flt)
@combination(DeepWindow, Put)
def deep_window_at_put(deep_window, put):
put.window = deep_window
@combination(Put, DeepWindow, 'Put')
def put_at_deep_window(put, deep_window):
xy_constraints = []
z_constraints = []
for c in put.constraints:
src, dst, weight = (c.src, Point3(c.dst), float(c.weight))
if not isinstance(src, (Point, Point3)):
reference_point = put.window.get(src)
if reference_point is None:
raise ValueError('Cannot find reference point {}'
.format(repr(src)))
src = reference_point
src = Point3(src)
xy_constraints.append(Near(src.get_xy(), dst.get_xy(), weight))
z_constraints.append((src.z, dst.z, weight))
# Calculate the xy part of the matrix.
t = put.auto_transform.calculate(put.transform, xy_constraints)
mx = t.get_matrix()
flt = DeepCmdArgFilter.from_matrix(mx)
deep_window.cmd_stream.add(put.window.cmd_stream, flt)
| mfnch/pyrtist | pyrtist/lib2deep/put.py | Python | lgpl-2.1 | 2,455 |
import os
import gzip
import numpy as np
import pandas as pd
import mdtraj
import ensembler
from ensembler.core import logger, get_most_advanced_ensembler_modeling_stage, default_project_dirnames, model_filenames_by_ensembler_stage, mpistate
from ensembler.refinement import remove_disulfide_bonds_from_topology, get_highest_seqid_existing_model
class MkTraj(object):
def __init__(self, targetid, ensembler_stage=None, traj_filepath=None, topol_filepath=None,
models_data_filepath=None, process_only_these_templates=None, loglevel=None,
run_main=True):
"""Makes a trajectory for a given target, using mdtraj. The trajectory can be used with other
software, e.g. for visualization with PyMOL or VMD.
Parameters
----------
targetid : str
e.g. 'EGFR_HUMAN_D0'
ensembler_stage : str
The Ensembler stage from which to build models, e.g. 'build_models' results in a trajectory
built from the 'model.pdb.gz' files output by the build_models command.
options: build_models|refine_implicit_md|refine_explicit_md
default: most advanced stage for which model files are available
traj_filepath : str
default: models/[targetid]/traj-[ensembler_stage].xtc
topol_filepath : str
default: models/[targetid]/traj-[ensembler_stage]-topol.pdb
models_data_filepath :
default: models/[targetid]/traj-[ensembler_stage]-data.csv
process_only_these_templates : list of str
Returns
-------
traj : mdtraj.Trajectory
df : pandas.DataFrame
models data (e.g. sequence identities):
"""
ensembler.utils.set_loglevel(loglevel)
ensembler.core.check_project_toplevel_dir()
self.models_target_dir = os.path.join(default_project_dirnames.models, targetid)
logger.debug('Working on target %s' % targetid)
if ensembler_stage is None:
self.ensembler_stage = get_most_advanced_ensembler_modeling_stage(targetid)
else:
self.ensembler_stage = ensembler_stage
if traj_filepath is None:
self.traj_filepath = os.path.join(
self.models_target_dir, 'traj-{0}.xtc'.format(self.ensembler_stage)
)
else:
self.traj_filepath = traj_filepath
if topol_filepath is None:
self.topol_filepath = os.path.join(
self.models_target_dir, 'traj-{0}-topol.pdb'.format(self.ensembler_stage)
)
else:
self.topol_filepath = topol_filepath
if models_data_filepath is None:
self.models_data_filepath = os.path.join(
self.models_target_dir, 'traj-{0}-data.csv'.format(self.ensembler_stage)
)
else:
self.models_data_filepath = models_data_filepath
if process_only_these_templates:
self.templateids = process_only_these_templates
else:
directories = [ directory for directory in os.walk(self.models_target_dir) ]
self.templateids = directories[0][1]
if run_main:
self._gen_df()
self.df.to_csv(self.models_data_filepath, columns=['templateid', 'seqid'])
self._construct_traj()
self._superpose()
self._write_traj()
def _gen_df(self, model_filename=None):
if model_filename is None:
model_filename = ensembler.core.model_filenames_by_ensembler_stage[self.ensembler_stage]
valid_model_templateids = [
templateid for templateid in self.templateids
if os.path.exists(os.path.join(self.models_target_dir, templateid, model_filename))
]
valid_model_filepaths = [
os.path.join(self.models_target_dir, templateid, model_filename)
for templateid in valid_model_templateids
]
seqid_filepaths = [
os.path.join(self.models_target_dir, templateid, 'sequence-identity.txt')
for templateid in valid_model_templateids
]
seqids = [
float(open(seqid_filepath).read().strip()) if os.path.exists(seqid_filepath) else None
for seqid_filepath in seqid_filepaths
]
self.df = pd.DataFrame({
'templateid': valid_model_templateids,
'model_filepath': valid_model_filepaths,
'seqid': seqids,
})
self.df.sort(columns='seqid', inplace=True, ascending=False)
self.df.reset_index(drop=True, inplace=True)
def _construct_traj(self):
logger.debug('Loading Trajectory object for model {0} ({1}/{2})'.format(self.df.templateid.iloc[0], 0, len(self.df.model_filepath)))
traj = mdtraj.load_pdb(self.df.model_filepath[0])
remove_disulfide_bonds_from_topology(traj.topology)
self.traj = traj
for m, model_filepath in enumerate(self.df.model_filepath[1:]):
logger.debug('Loading Trajectory object for model {0} ({1}/{2})'.format(self.df.templateid.iloc[m+1], m+1, len(self.df.model_filepath)))
traj = mdtraj.load_pdb(model_filepath)
remove_disulfide_bonds_from_topology(traj.topology)
self.traj += traj
def _superpose(self):
"""
Superpose structured C-alphas
"""
self.dssp = mdtraj.compute_dssp(self.traj[0])[0]
structured_resis_bool = (self.dssp == 'H') + (self.dssp == 'E')
alpha_indices = self.traj.topology.select_atom_indices('alpha')
structured_alpha_indices = np.array([
alpha_indices[x] for x in range(self.traj.n_residues) if structured_resis_bool[x]
])
self.traj.superpose(reference=self.traj, frame=0, atom_indices=structured_alpha_indices)
def _write_traj(self):
"""
Write traj, and write first frame as pdb file
"""
self.traj[0].save(self.topol_filepath)
self.traj.save(self.traj_filepath)
class MkTrajImplicitStart(MkTraj):
def __init__(self, targetid, traj_filepath=None, topol_filepath=None,
models_data_filepath=None, process_only_these_templates=None, loglevel=None,
run_main=True):
"""
Makes trajectory of the model files with added hydrogens, but prior to any refinement.
For the specified target, makes a single topology pdb file, a single trajectory xtc file,
and individual pdb files for each model.
See docs on `MkTraj` for further info on paramters.
Examples
--------
MkTrajImplicitStart(targetid='EGFR_HUMAN_D0')
"""
ensembler.utils.set_loglevel(loglevel)
ensembler.core.check_project_toplevel_dir()
self.models_target_dir = os.path.join(default_project_dirnames.models, targetid)
logger.debug('Working on target %s' % targetid)
self.ensembler_stage = 'implicit-start'
self.model_filename = 'implicit-start.pdb.gz'
if traj_filepath is None:
self.traj_filepath = os.path.join(
self.models_target_dir, 'traj-{0}.xtc'.format(self.ensembler_stage)
)
else:
self.traj_filepath = traj_filepath
if topol_filepath is None:
self.topol_filepath = os.path.join(
self.models_target_dir, 'traj-{0}-topol.pdb'.format(self.ensembler_stage)
)
else:
self.topol_filepath = topol_filepath
if models_data_filepath is None:
self.models_data_filepath = os.path.join(
self.models_target_dir, 'traj-{0}-data.csv'.format(self.ensembler_stage)
)
else:
self.models_data_filepath = models_data_filepath
if process_only_these_templates:
self.templateids = process_only_these_templates
else:
directories = [ directory for directory in os.walk(self.models_target_dir) ]
self.templateids = directories[0][1]
if run_main:
self._gen_implicit_start_models()
self._gen_df(model_filename=self.model_filename)
self.df.to_csv(self.models_data_filepath, columns=['templateid', 'seqid'])
self._construct_traj()
self._superpose()
self._write_traj()
def _gen_implicit_start_models(
self,
ff='amber99sbildn.xml', implicit_water_model='amber99_obc.xml',
ph=8.0):
self.ph = ph
from simtk.openmm import app
valid_model_templateids = [
templateid for templateid in self.templateids
if os.path.exists(
os.path.join(
self.models_target_dir, templateid,
ensembler.core.model_filenames_by_ensembler_stage['refine_implicit_md']
)
)
]
gen_model_templateids = [
templateid for templateid in valid_model_templateids
if not os.path.exists(
os.path.join(self.models_target_dir, templateid, self.model_filename)
)
]
# make reference model
forcefield = app.ForceField(ff, implicit_water_model)
reference_model_id = get_highest_seqid_existing_model(models_target_dir=self.models_target_dir)
logger.debug('Using {0} as reference model'.format(reference_model_id))
reference_model_path = os.path.join(self.models_target_dir, reference_model_id, model_filenames_by_ensembler_stage['build_models'])
with gzip.open(reference_model_path) as reference_pdb_file:
reference_pdb = app.PDBFile(reference_pdb_file)
remove_disulfide_bonds_from_topology(reference_pdb.topology)
reference_topology = reference_pdb.topology
reference_modeller = app.Modeller(reference_pdb.topology, reference_pdb.positions)
reference_variants = reference_modeller.addHydrogens(forcefield, pH=self.ph)
for template_index in range(mpistate.rank, len(gen_model_templateids), mpistate.size):
templateid = gen_model_templateids[template_index]
logger.debug('Generating implicit-start model for {0}'.format(templateid))
try:
input_model_filepath = os.path.join(self.models_target_dir, templateid, model_filenames_by_ensembler_stage['build_models'])
output_model_filepath = os.path.join(self.models_target_dir, templateid, self.model_filename)
with gzip.open(input_model_filepath) as pdb_file:
pdb = app.PDBFile(pdb_file)
remove_disulfide_bonds_from_topology(pdb.topology)
modeller = app.Modeller(reference_topology, pdb.positions)
modeller.addHydrogens(forcefield, pH=self.ph, variants=reference_variants)
topology = modeller.getTopology()
positions = modeller.getPositions()
with gzip.open(output_model_filepath, 'wt') as output_model_file:
app.PDBFile.writeHeader(topology, file=output_model_file)
app.PDBFile.writeFile(topology, positions, file=output_model_file)
app.PDBFile.writeFooter(topology, file=output_model_file)
except Exception as e:
print('Error for model {0}: {1}'.format(templateid, e))
continue
# import ipdb; ipdb.set_trace()
| choderalab/ensembler | ensembler/tools/mktraj.py | Python | gpl-2.0 | 11,471 |
from lettuce import *
from base import *
import re
def find_pattern(pattern, process, timeout):
found = False
lines = []
for l in process.read_lines(timeout):
lines.append(l)
if re.search(pattern, l):
found = True
break
return found, lines
@step(u'an empty configuration')
def empty_configuration(step):
world.airpnp_config = {}
@step(u'Airpnp is started')
def start_airpnp(step):
world.start_airpnp()
@step('the log should contain the message "(.*)"')
def see_log_message(step, message):
pattern = ".*%s.*" % re.escape(message)
found, lines = find_pattern(pattern, world.airpnp, 10000)
assert found == True, "Got log lines: %r" % lines
@step(u'an? (.*) with UDN (.*) and name (.*) is running')
def media_renderer_is_running(step, device, udn, name):
# args are unicode, need to convert to str first!
device = str(device).replace(" ", "")
cmd = "python upnpclient.py %s %s %s" % (device, str(udn), str(name))
world.start_process(cmd)
@step(u'Then an AirPlay service is published with the name (.*)')
def airplay_service_published(step, name):
browser = world.start_process("avahi-browse -prk _airplay._tcp")
pattern = "^=.*;%s;" % re.escape(name)
found, lines = find_pattern(pattern, browser, 10000)
assert found == True, "Got log lines: %r" % lines
world.airplay_service_lines = [l for l in lines if l.startswith("=")]
@step(u'And the AirPlay service has features set to (.*)')
def and_the_airplay_service_has_features_set_to_0x77(step, features):
lines = world.airplay_service_lines
matches = [l for l in lines if l.find("features=" + features) != -1]
assert len(matches) > 0
@step(u'And the AirPlay service has model set to (.*)')
def and_the_airplay_service_has_model_set_to_appletv2_1(step, model):
lines = world.airplay_service_lines
matches = [l for l in lines if l.find("model=" + model) != -1]
assert len(matches) > 0
@step(u'Then (.*) AirPlay services with name prefix (.*) are published')
def then_2_airplay_services_are_published(step, count, prefix):
browser = world.start_process("avahi-browse -prk _airplay._tcp")
services = []
for l in browser.read_lines(10000):
parts = l.split(";")
if parts[0] == "=" and parts[2] == "IPv4" and parts[3].startswith(prefix):
services.append(l)
world.airplay_service_lines = services
assert len(world.airplay_service_lines) == int(count)
@step(u'And the AirPlay services have different device IDs')
def and_the_airplay_services_have_different_device_ids(step):
devids = [re.search('"deviceid=([:a-zA-Z0-9]+)"', line).group(1) for line in
world.airplay_service_lines]
unique = len(set(devids))
assert unique == len(devids), "Found device IDs: " + str(devids)
| provegard/airpnp | features/steps.py | Python | bsd-3-clause | 2,828 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import weakref
import sip
from PyQt5.Qt import (
QLineEdit, QAbstractListModel, Qt, pyqtSignal, QObject, QKeySequence,
QApplication, QListView, QPoint, QModelIndex, QFont, QFontInfo,
QStyleOptionComboBox, QStyle, QComboBox, QTimer)
from calibre.constants import isosx, get_osx_version
from calibre.utils.icu import sort_key, primary_startswith, primary_contains
from calibre.gui2.widgets import EnComboBox, LineEditECM
from calibre.utils.config import tweaks
def containsq(x, prefix):
return primary_contains(prefix, x)
class CompleteModel(QAbstractListModel): # {{{
def __init__(self, parent=None, sort_func=sort_key):
QAbstractListModel.__init__(self, parent)
self.sort_func = sort_func
self.all_items = self.current_items = ()
self.current_prefix = ''
def set_items(self, items):
items = [unicode(x.strip()) for x in items]
items = [x for x in items if x]
items = tuple(sorted(items, key=self.sort_func))
self.beginResetModel()
self.all_items = self.current_items = items
self.current_prefix = ''
self.endResetModel()
def set_completion_prefix(self, prefix):
old_prefix = self.current_prefix
self.current_prefix = prefix
if prefix == old_prefix:
return
if not prefix:
self.beginResetModel()
self.current_items = self.all_items
self.endResetModel()
return
subset = prefix.startswith(old_prefix)
universe = self.current_items if subset else self.all_items
func = primary_startswith if tweaks['completion_mode'] == 'prefix' else containsq
self.beginResetModel()
self.current_items = tuple(x for x in universe if func(x, prefix))
self.endResetModel()
def rowCount(self, *args):
return len(self.current_items)
def data(self, index, role):
if role == Qt.DisplayRole:
try:
return self.current_items[index.row()]
except IndexError:
pass
def index_for_prefix(self, prefix):
for i, item in enumerate(self.current_items):
if primary_startswith(item, prefix):
return self.index(i)
# }}}
class Completer(QListView): # {{{
item_selected = pyqtSignal(object)
relayout_needed = pyqtSignal()
def __init__(self, completer_widget, max_visible_items=7, sort_func=sort_key):
QListView.__init__(self)
self.disable_popup = False
self.completer_widget = weakref.ref(completer_widget)
self.setWindowFlags(Qt.Popup)
self.max_visible_items = max_visible_items
self.setEditTriggers(self.NoEditTriggers)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setSelectionBehavior(self.SelectRows)
self.setSelectionMode(self.SingleSelection)
self.setAlternatingRowColors(True)
self.setModel(CompleteModel(self, sort_func=sort_func))
self.setMouseTracking(True)
self.entered.connect(self.item_entered)
self.activated.connect(self.item_chosen)
self.pressed.connect(self.item_chosen)
self.installEventFilter(self)
def hide(self):
self.setCurrentIndex(QModelIndex())
QListView.hide(self)
def item_chosen(self, index):
if not self.isVisible():
return
self.hide()
text = self.model().data(index, Qt.DisplayRole)
self.item_selected.emit(unicode(text))
def set_items(self, items):
self.model().set_items(items)
if self.isVisible():
self.relayout_needed.emit()
def set_completion_prefix(self, prefix):
self.model().set_completion_prefix(prefix)
if self.isVisible():
self.relayout_needed.emit()
def item_entered(self, idx):
if self.visualRect(idx).top() < self.viewport().rect().bottom() - 5:
# Prevent any bottom item in the list that is only partially
# visible from triggering setCurrentIndex()
self.entered.disconnect()
try:
self.setCurrentIndex(idx)
finally:
self.entered.connect(self.item_entered)
def next_match(self, previous=False):
c = self.currentIndex()
if c.isValid():
r = c.row()
else:
r = self.model().rowCount() if previous else -1
r = r + (-1 if previous else 1)
index = self.model().index(r % self.model().rowCount())
self.setCurrentIndex(index)
def scroll_to(self, orig):
if orig:
index = self.model().index_for_prefix(orig)
if index is not None and index.isValid():
self.setCurrentIndex(index)
def popup(self, select_first=True):
if self.disable_popup:
return
p = self
m = p.model()
widget = self.completer_widget()
if widget is None:
return
screen = QApplication.desktop().availableGeometry(widget)
h = (p.sizeHintForRow(0) * min(self.max_visible_items, m.rowCount()) +
3) + 3
hsb = p.horizontalScrollBar()
if hsb and hsb.isVisible():
h += hsb.sizeHint().height()
rh = widget.height()
pos = widget.mapToGlobal(QPoint(0, widget.height() - 2))
w = min(widget.width(), screen.width())
if (pos.x() + w) > (screen.x() + screen.width()):
pos.setX(screen.x() + screen.width() - w)
if pos.x() < screen.x():
pos.setX(screen.x())
top = pos.y() - rh - screen.top() + 2
bottom = screen.bottom() - pos.y()
h = max(h, p.minimumHeight())
if h > bottom:
h = min(max(top, bottom), h)
if top > bottom:
pos.setY(pos.y() - h - rh + 2)
p.setGeometry(pos.x(), pos.y(), w, h)
if (tweaks['preselect_first_completion'] and select_first and not
self.currentIndex().isValid() and self.model().rowCount() > 0):
self.setCurrentIndex(self.model().index(0))
if not p.isVisible():
if isosx and get_osx_version() >= (10, 9, 0):
# On mavericks the popup menu seems to use a font smaller than
# the widgets font, see for example:
# https://bugs.launchpad.net/bugs/1243761
fp = QFontInfo(widget.font())
f = QFont()
f.setPixelSize(fp.pixelSize())
self.setFont(f)
p.show()
def debug_event(self, ev):
from calibre.gui2 import event_type_name
print ('Event:', event_type_name(ev))
if ev.type() in (ev.KeyPress, ev.ShortcutOverride, ev.KeyRelease):
print ('\tkey:', QKeySequence(ev.key()).toString())
def eventFilter(self, obj, e):
'Redirect key presses from the popup to the widget'
widget = self.completer_widget()
if widget is None or sip.isdeleted(widget):
return False
etype = e.type()
if obj is not self:
return QObject.eventFilter(self, obj, e)
# self.debug_event(e)
if etype == e.KeyPress:
try:
key = e.key()
except AttributeError:
return QObject.eventFilter(self, obj, e)
if key == Qt.Key_Escape:
self.hide()
e.accept()
return True
if key == Qt.Key_F4 and e.modifiers() & Qt.AltModifier:
self.hide()
e.accept()
return True
if key in (Qt.Key_Enter, Qt.Key_Return):
# We handle this explicitly because on OS X activated() is
# not emitted on pressing Enter.
idx = self.currentIndex()
if idx.isValid():
self.item_chosen(idx)
self.hide()
e.accept()
return True
if key == Qt.Key_Tab:
idx = self.currentIndex()
if idx.isValid():
self.item_chosen(idx)
self.hide()
elif self.model().rowCount() > 0:
self.next_match()
e.accept()
return True
if key in (Qt.Key_PageUp, Qt.Key_PageDown):
# Let the list view handle these keys
return False
if key in (Qt.Key_Up, Qt.Key_Down):
self.next_match(previous=key == Qt.Key_Up)
e.accept()
return True
# Send to widget
widget.eat_focus_out = False
widget.keyPressEvent(e)
widget.eat_focus_out = True
if not widget.hasFocus():
# Widget lost focus hide the popup
self.hide()
if e.isAccepted():
return True
elif isosx and etype == e.InputMethodQuery and e.queries() == (Qt.ImHints | Qt.ImEnabled) and self.isVisible():
# In Qt 5 the Esc key causes this event and the line edit does not
# handle it, which causes the parent dialog to be closed
# See https://bugreports.qt-project.org/browse/QTBUG-41806
e.accept()
return True
elif etype == e.MouseButtonPress and hasattr(e, 'globalPos') and not self.rect().contains(self.mapFromGlobal(e.globalPos())):
# A click outside the popup, close it
if isinstance(widget, QComboBox):
# This workaround is needed to ensure clicking on the drop down
# arrow of the combobox closes the popup
opt = QStyleOptionComboBox()
widget.initStyleOption(opt)
sc = widget.style().hitTestComplexControl(QStyle.CC_ComboBox, opt, widget.mapFromGlobal(e.globalPos()), widget)
if sc == QStyle.SC_ComboBoxArrow:
QTimer.singleShot(0, self.hide)
e.accept()
return True
self.hide()
e.accept()
return True
elif etype in (e.InputMethod, e.ShortcutOverride):
QApplication.sendEvent(widget, e)
return False
# }}}
class LineEdit(QLineEdit, LineEditECM):
'''
A line edit that completes on multiple items separated by a
separator. Use the :meth:`update_items_cache` to set the list of
all possible completions. Separator can be controlled with the
:meth:`set_separator` and :meth:`set_space_before_sep` methods.
A call to self.set_separator(None) will allow this widget to be used
to complete non multiple fields as well.
'''
item_selected = pyqtSignal(object)
def __init__(self, parent=None, completer_widget=None, sort_func=sort_key):
QLineEdit.__init__(self, parent)
self.sep = ','
self.space_before_sep = False
self.add_separator = True
self.original_cursor_pos = None
completer_widget = (self if completer_widget is None else
completer_widget)
self.mcompleter = Completer(completer_widget, sort_func=sort_func)
self.mcompleter.item_selected.connect(self.completion_selected,
type=Qt.QueuedConnection)
self.mcompleter.relayout_needed.connect(self.relayout)
self.mcompleter.setFocusProxy(completer_widget)
self.textEdited.connect(self.text_edited)
self.no_popup = False
# Interface {{{
def update_items_cache(self, complete_items):
self.all_items = complete_items
def set_separator(self, sep):
self.sep = sep
def set_space_before_sep(self, space_before):
self.space_before_sep = space_before
def set_add_separator(self, what):
self.add_separator = bool(what)
@dynamic_property
def all_items(self):
def fget(self):
return self.mcompleter.model().all_items
def fset(self, items):
self.mcompleter.model().set_items(items)
return property(fget=fget, fset=fset)
@dynamic_property
def disable_popup(self):
def fget(self):
return self.mcompleter.disable_popup
def fset(self, val):
self.mcompleter.disable_popup = bool(val)
return property(fget=fget, fset=fset)
# }}}
def event(self, ev):
# See https://bugreports.qt.io/browse/QTBUG-46911
if ev.type() == ev.ShortcutOverride and (
ev.key() in (Qt.Key_Left, Qt.Key_Right) and (ev.modifiers() & ~Qt.KeypadModifier) == Qt.ControlModifier):
ev.accept()
return QLineEdit.event(self, ev)
def complete(self, show_all=False, select_first=True):
orig = None
if show_all:
orig = self.mcompleter.model().current_prefix
self.mcompleter.set_completion_prefix('')
if not self.mcompleter.model().current_items:
self.mcompleter.hide()
return
self.mcompleter.popup(select_first=select_first)
self.setFocus(Qt.OtherFocusReason)
self.mcompleter.scroll_to(orig)
def relayout(self):
self.mcompleter.popup()
self.setFocus(Qt.OtherFocusReason)
def text_edited(self, *args):
if self.no_popup:
return
self.update_completions()
select_first = len(self.mcompleter.model().current_prefix) > 0
if not select_first:
self.mcompleter.setCurrentIndex(QModelIndex())
self.complete(select_first=select_first)
def update_completions(self):
' Update the list of completions '
self.original_cursor_pos = cpos = self.cursorPosition()
text = unicode(self.text())
prefix = text[:cpos]
complete_prefix = prefix.lstrip()
if self.sep:
complete_prefix = prefix.split(self.sep)[-1].lstrip()
self.mcompleter.set_completion_prefix(complete_prefix)
def get_completed_text(self, text):
'Get completed text in before and after parts'
if self.sep is None:
return text, ''
else:
cursor_pos = self.original_cursor_pos
if cursor_pos is None:
cursor_pos = self.cursorPosition()
self.original_cursor_pos = None
# Split text
curtext = unicode(self.text())
before_text = curtext[:cursor_pos]
after_text = curtext[cursor_pos:].rstrip()
# Remove the completion prefix from the before text
before_text = self.sep.join(before_text.split(self.sep)[:-1]).rstrip()
if before_text:
# Add the separator to the end of before_text
if self.space_before_sep:
before_text += ' '
before_text += self.sep + ' '
if self.add_separator or after_text:
# Add separator to the end of completed text
if self.space_before_sep:
text = text.rstrip() + ' '
completed_text = text + self.sep + ' '
else:
completed_text = text
return before_text + completed_text, after_text
def completion_selected(self, text):
before_text, after_text = self.get_completed_text(unicode(text))
self.setText(before_text + after_text)
self.setCursorPosition(len(before_text))
self.item_selected.emit(text)
class EditWithComplete(EnComboBox):
item_selected = pyqtSignal(object)
def __init__(self, *args, **kwargs):
EnComboBox.__init__(self, *args)
self.setLineEdit(LineEdit(self, completer_widget=self, sort_func=kwargs.get('sort_func', sort_key)))
self.lineEdit().item_selected.connect(self.item_selected)
self.setCompleter(None)
self.eat_focus_out = True
self.installEventFilter(self)
# Interface {{{
def showPopup(self):
orig = self.disable_popup
self.disable_popup = False
try:
self.lineEdit().complete(show_all=True)
finally:
self.disable_popup = orig
def update_items_cache(self, complete_items):
self.lineEdit().update_items_cache(complete_items)
def set_separator(self, sep):
self.lineEdit().set_separator(sep)
def set_space_before_sep(self, space_before):
self.lineEdit().set_space_before_sep(space_before)
def set_add_separator(self, what):
self.lineEdit().set_add_separator(what)
def show_initial_value(self, what):
what = unicode(what) if what else u''
self.setText(what)
self.lineEdit().selectAll()
@dynamic_property
def all_items(self):
def fget(self):
return self.lineEdit().all_items
def fset(self, val):
self.lineEdit().all_items = val
return property(fget=fget, fset=fset)
@dynamic_property
def disable_popup(self):
def fget(self):
return self.lineEdit().disable_popup
def fset(self, val):
self.lineEdit().disable_popup = bool(val)
return property(fget=fget, fset=fset)
# }}}
def text(self):
return unicode(self.lineEdit().text())
def selectAll(self):
self.lineEdit().selectAll()
def setText(self, val):
le = self.lineEdit()
le.no_popup = True
le.setText(val)
le.no_popup = False
def setCursorPosition(self, *args):
self.lineEdit().setCursorPosition(*args)
@property
def textChanged(self):
return self.lineEdit().textChanged
def clear(self):
self.lineEdit().clear()
EnComboBox.clear(self)
def eventFilter(self, obj, e):
try:
c = self.lineEdit().mcompleter
except AttributeError:
return False
etype = e.type()
if self.eat_focus_out and self is obj and etype == e.FocusOut:
if c.isVisible():
return True
return EnComboBox.eventFilter(self, obj, e)
if __name__ == '__main__':
from PyQt5.Qt import QDialog, QVBoxLayout
app = QApplication([])
d = QDialog()
d.setLayout(QVBoxLayout())
le = EditWithComplete(d)
d.layout().addWidget(le)
items = ['one', 'otwo', 'othree', 'ooone', 'ootwo',
'oothree', 'a1', 'a2',u'Edgas', u'Èdgar', u'Édgaq', u'Edgar', u'Édgar']
le.update_items_cache(items)
le.show_initial_value('')
d.exec_()
| jelly/calibre | src/calibre/gui2/complete2.py | Python | gpl-3.0 | 18,843 |
import matplotlib.pyplot as plt
import numpy as np
T = 2
mu = 0.1
sigma = 0.01
S0 = 20
dt = 0.01
N = round(T/dt)
t = np.linspace(0, T, N)
W = np.random.standard_normal(size = N)
W = np.cumsum(W)*np.sqrt(dt) ### standard brownian motion ###
X = (mu-0.5*sigma**2)*t + sigma*W
S = S0*np.exp(X) ### geometric brownian motion ###
plt.plot(t, S)
plt.show()
| algoix/blog | GBM_simulation.py | Python | mit | 354 |
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import winreg as winreg
import os
from cerbero.config import Architecture
from cerbero.utils import fix_winpath, shell
class MSBuild(object):
def __init__(self, solution, arch=Architecture.X86, config='Release',
sdk='Windows7.1SDK', **properties):
self.properties = {}
if arch == Architecture.X86:
self.properties['Platform'] = 'Win32'
elif arch == Architecture.X86_64:
self.properties['Platform'] = 'x64'
self.properties['Configuration'] = config
self.properties['PlatformToolset'] = sdk
self.properties.update(properties)
self.solution = solution
def build(self):
self._call('build')
@staticmethod
def get_msbuild_tools_path():
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
key = winreg.OpenKey(reg,
r"SOFTWARE\Microsoft\MSBuild\ToolsVersions\4.0")
path = winreg.QueryValueEx(key, 'MSBuildToolsPath')[0]
return fix_winpath(path)
@staticmethod
def get_vs_path():
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
key = winreg.OpenKey(reg,
r"SOFTWARE\Microsoft\VisualStudio\SxS\VC7")
path = winreg.QueryValueEx(key, '10.0')[0]
path = str(path)
path = path.replace('\\VC', '\\Common7\\IDE')
return path
def _call(self, command):
properties = self._format_properties()
msbuildpath = self.get_msbuild_tools_path()
vs_path = self.get_vs_path()
old_path = os.environ['PATH']
if self.properties['Platform'] == 'Win32':
os.environ['PATH'] = '%s;%s' % (os.environ['PATH'], vs_path)
try:
shell.new_call(['msbuild.exe', self.solution, *properties, '/target:%s' %
(command,)], msbuildpath)
finally:
os.environ['PATH'] = old_path
def _format_properties(self):
return ['/property:%s=%s' % (k, v) for k, v in
self.properties.items()]
| nirbheek/cerbero | cerbero/utils/msbuild.py | Python | lgpl-2.1 | 2,909 |
import os
from kokki import Environment, Execute
def module(name, enable=True, conf=False):
env = Environment.get_instance()
if conf:
env.cookbooks.apache2.config(name)
if enable:
Execute("a2enmod %s" % name,
command = "/usr/sbin/a2enmod %s" % name,
notifies = [("restart", env.resources["Service"]["apache2"])],
not_if = lambda:os.path.exists("%s/mods-enabled/%s.load" % (env.config.apache.dir, name)))
else:
Execute("a2dismod %s" % name,
command = "/usr/sbin/a2dismod %s" % name,
notifies = [("restart", env.resources["Service"]["apache2"])],
only_if = lambda:os.path.exists("%s/mods-enabled/%s.load" % (env.config.apache.dir, name)))
| samuel/kokki | kokki/cookbooks/apache2/libraries/module.py | Python | bsd-3-clause | 753 |
def First_Part(prog):
i = 0
while i < len(prog):
inst = prog[i]
opMode = int(inst[-2:])
parmModes = inst[:-2]
parmModeApply = lambda parmMode, index: int(prog[index]) if parmMode == "1" else int(prog[int(prog[index])])
if opMode == 1:
addends = []
for j in range(i+1, i+3):
parmMode = parmModes[-1] if parmModes else "0"
addends.append(parmModeApply(parmMode, j))
parmModes = parmModes[:-1]
prog[int(prog[i+3])] = str(sum(addends))
i += 4
elif opMode == 2:
mult = []
for j in range(i+1, i+3):
parmMode = parmModes[-1] if parmModes else "0"
mult.append(parmModeApply(parmMode, j))
parmModes = parmModes[:-1]
prog[int(prog[i+3])] = str(mult[0] * mult[1])
i += 4
elif opMode == 3:
prog[int(prog[i+1])] = input()
i += 2
elif opMode == 4:
parmMode = parmModes[-1] if parmModes else "0"
print("Output:", parmModeApply(parmMode, i+1))
i += 2
elif opMode == 99:
return 0
else:
i += 1
def Second_Part(prog):
i = 0
while i < len(prog):
inst = prog[i]
opMode = int(inst[-2:])
parmModes = inst[:-2]
getParmMode = lambda parmModes: parmModes[-1] if parmModes else "0"
parmModeApply = lambda parmMode, index: int(prog[index]) if parmMode == "1" else int(prog[int(prog[index])])
if opMode == 1:
addends = []
for j in range(i+1, i+3):
parmMode = getParmMode(parmModes)
addends.append(parmModeApply(parmMode, j))
parmModes = parmModes[:-1]
prog[int(prog[i+3])] = str(sum(addends))
i += 4
elif opMode == 2:
mult = []
for j in range(i+1, i+3):
parmMode = getParmMode(parmModes)
mult.append(parmModeApply(parmMode, j))
parmModes = parmModes[:-1]
prog[int(prog[i+3])] = str(mult[0] * mult[1])
i += 4
elif opMode == 3:
prog[int(prog[i+1])] = input()
i += 2
elif opMode == 4:
parmMode = getParmMode(parmModes)
print("Output:", parmModeApply(parmMode, i+1))
i += 2
elif opMode == 5:
if parmModeApply(getParmMode(parmModes), i+1):
parmModes = parmModes[:-1]
i = parmModeApply(getParmMode(parmModes), i+2)
else:
i += 2
elif opMode == 6:
if not parmModeApply(getParmMode(parmModes), i+1):
parmModes = parmModes[:-1]
i = parmModeApply(getParmMode(parmModes), i+2)
else:
i += 2
elif opMode == 7:
a = parmModeApply(getParmMode(parmModes), i+1)
parmModes = parmModes[:-1]
b = parmModeApply(getParmMode(parmModes), i+2)
if a < b:
prog[int(prog[i+3])] = "1"
else:
prog[int(prog[i+3])] = "0"
elif opMode == 8:
a = parmModeApply(getParmMode(parmModes), i+1)
parmModes = parmModes[:-1]
b = parmModeApply(getParmMode(parmModes), i+2)
if a == b:
prog[int(prog[i+3])] = "1"
else:
prog[int(prog[i+3])] = "0"
elif opMode == 99:
return 0
else:
i += 1
Input = open("Inputs/Day_05.txt", "r").read().split(",")
# Input = "1002,4,3,4,33".split(",")
# First_Part(Input)
Input = "3,9,8,9,10,9,4,9,99,-1,8".split(",")
Second_Part(Input)
| ImpregnableProgrammer/Advent-of-Code | 2019/Day_05.py | Python | gpl-3.0 | 3,813 |
from fabric.api import hide, run, env
import time
import json
def run_cmd(cmd):
with hide('output', 'running', 'warnings'):
return run(cmd, timeout=1200)
def check(**kwargs):
''' Login over SSH and execute shell command '''
jdata = kwargs['jdata']
logger = kwargs['logger']
env.gateway = jdata['data']['gateway']
env.host_string = jdata['data']['host_string']
env.user = jdata['data']['username']
env.key = jdata['data']['sshkey']
env.shell = "/bin/sh -c"
env.disable_known_hosts = True
env.warn_only = True
env.abort_on_prompts = True
cmd = ""
if jdata['data']['use_sudo'] == "true":
cmd = "sudo "
cmd = cmd + "docker inspect {0}".format(jdata['data']['container_name'])
try:
results = run_cmd(cmd)
except:
return None
logger.debug("docker-container-running: requested command" +
" returned with exit code {0}".format(results.return_code))
if results.succeeded:
container_data = json.loads(results)
if "State" not in container_data[0]:
return False
logger.debug("docker-container-running: container state" +
" returned running {0}".format(container_data[0]['State']['Running']))
if container_data[0]['State']['Running']:
return True
else:
return False
else:
return False
| Runbook/runbook | src/monitors/checks/docker-container-running/__init__.py | Python | apache-2.0 | 1,409 |
"""
Form for:
- Select geospatial data
- Select column 1
- Select column 2 (if lat/lng)
- clean
- column 2 optional
- column 1 cannot be same as column 2
"""
from django import forms
from gc_apps.gis_tabular.models import TabularFileInfo
#from gc_apps.worldmap_connect.jointarget_formatter import JoinTargetFormatter
GEO_TYPE_LATITUDE_LONGITUDE = 'latitude-longitude'
SELECT_LABEL = 'Select...'
INITIAL_SELECT_CHOICE = ('', SELECT_LABEL)
class TabularFileInfoForm(forms.ModelForm):
class Meta:
model = TabularFileInfo
exclude = ('created', 'modified')
class ChooseSingleColumnForm(forms.Form):
"""
Basic form for capturing chosen column name
"""
tabular_file_info_id = forms.IntegerField(widget=forms.HiddenInput())
chosen_layer = forms.ChoiceField(label="WorldMap Layer", choices=())
chosen_column = forms.ChoiceField(label="Column Name", choices=())
def __init__(self, tabular_file_info_id, layer_choices, column_names, *args, **kwargs):
super(ChooseSingleColumnForm, self).__init__(*args, **kwargs)
assert column_names is not None, "You must initiate this form with column names"
colname_choices = [INITIAL_SELECT_CHOICE] + [(c, c) for c in column_names if c]
#print 'colname_choices', colname_choices
self.fields['tabular_file_info_id'].initial = tabular_file_info_id
self.fields['chosen_layer'].choices = layer_choices
self.fields['chosen_layer'].widget.attrs.update({'class' : 'form-control'})
self.fields['chosen_column'].choices = colname_choices
self.fields['chosen_column'].widget.attrs.update({'class' : 'form-control'})
def clean_chosen_layer(self):
chosen_layer_id = self.cleaned_data.get('chosen_layer', None)
if chosen_layer_id is None:
ValidationError(_('You must choose a layer'))
try:
return int(chosen_layer_id)
except ValueError:
ValidationError(_('The layer does not have a valid id. (talk to the admin)'))
class LatLngColumnsForm(forms.Form):
"""
Simple form for capturing latitude and longitude column names
"""
err_msg_for_web = None
tabular_file_info_id = forms.IntegerField(widget=forms.HiddenInput())
latitude = forms.ChoiceField(label='Column Name (Latitude)', choices=())
longitude = forms.ChoiceField(label='Column Name (Longitude)', choices=())
def __init__(self, tabular_file_info_id, column_names, *args, **kwargs):
super(LatLngColumnsForm, self).__init__(*args, **kwargs)
assert column_names is not None, "You must initiate this form with column names"
colname_choices = [INITIAL_SELECT_CHOICE] + [(c, c) for c in column_names if c]
#print 'colname_choices', colname_choices
self.fields['tabular_file_info_id'].initial = tabular_file_info_id
self.fields['latitude'].choices = colname_choices
self.fields['longitude'].choices = colname_choices
self.fields['latitude'].widget.attrs.update({'class' : 'form-control'})
self.fields['longitude'].widget.attrs.update({'class' : 'form-control'})
def get_latitude_colname(self):
assert self.cleaned_data is not None, "Do not call this unless .is_valid() is True"
return self.cleaned_data.get('latitude')
def get_longitude_colname(self):
assert self.cleaned_data is not None, "Do not call this unless .is_valid() is True"
return self.cleaned_data.get('longitude')
def clean_latitude(self):
data = self.cleaned_data.get('latitude')
if not data:
raise forms.ValidationError("Please select a Latitude column")
return data
def clean_longitude(self):
data = self.cleaned_data.get('longitude')
if not data:
raise forms.ValidationError("Please select a Longitude column")
return data
def clean(self):
"""
Check to make sure the lat and lng columns aren't the same
"""
cleaned_data = super(LatLngColumnsForm, self).clean()
latitude = self.clean_latitude() #cleaned_data.get('latitude')
longitude = self.clean_longitude() # cleaned_data.get('longitude')
if latitude == longitude:
err_msg = 'The Longitude column cannot be the same as the Latitude column.'
self.err_msg_for_web = err_msg
# django 1.6
self._errors["longitude"] = self.error_class([err_msg])
del self.cleaned_data["longitude"]
#self.add_error('longitude', err_msg) # django 1.8
raise forms.ValidationError(err_msg)
return self.cleaned_data
"""
from gc_apps.gis_tabular.forms import LatLngColumnsForm
f = LatLngColumnsForm()
"""
| IQSS/geoconnect | gc_apps/gis_tabular/forms.py | Python | apache-2.0 | 4,797 |
import binascii
import logging
import os
from .. import concretization_strategies
from ..engines.soot.values import (SimSootValue_ArrayRef,
SimSootValue_InstanceFieldRef,
SimSootValue_Local, SimSootValue_ParamRef,
SimSootValue_StaticFieldRef,
SimSootValue_StringRef)
from ..errors import SimMemoryAddressError, SimUnsatError
from ..sim_state import SimState
from ..storage.memory import SimMemory
from .keyvalue_memory import SimKeyValueMemory
from .plugin import SimStatePlugin
l = logging.getLogger("angr.state_plugins.javavm_memory")
MAX_ARRAY_SIZE = 1000 # FIXME arbitrarily chosen limit
class SimJavaVmMemory(SimMemory):
def __init__(self, memory_id="mem", stack=None, heap=None, vm_static_table=None,
load_strategies=None, store_strategies=None):
super(SimJavaVmMemory, self).__init__()
self.id = memory_id
self._stack = [] if stack is None else stack
self.heap = SimKeyValueMemory("mem") if heap is None else heap
self.vm_static_table = SimKeyValueMemory("mem") if vm_static_table is None else vm_static_table
# Heap helper
# TODO: ask someone how we want to manage this
# TODO: Manage out of memory allocation
# self._heap_allocation_id = 0
self.max_array_size = MAX_ARRAY_SIZE
# concretizing strategies
self.load_strategies = load_strategies if load_strategies else []
self.store_strategies = store_strategies if store_strategies else []
@staticmethod
def get_new_uuid():
"""
Generate a unique id within the scope of the JavaVM memory. This, for
example, is used for distinguishing memory objects of the same type
(e.g. multiple instances of the same class).
"""
# self._heap_allocation_id += 1
# return str(self._heap_allocation_id)
return binascii.hexlify(os.urandom(4))
def store(self, addr, data, frame=0): # pylint: disable=arguments-differ
if type(addr) is SimSootValue_Local:
cstack = self._stack[-1+(-1*frame)]
cstack.store(addr.id, data, type_=addr.type)
elif type(addr) is SimSootValue_ParamRef:
cstack = self._stack[-1+(-1*frame)]
cstack.store(addr.id, data, type_=addr.type)
elif type(addr) is SimSootValue_ArrayRef:
self.store_array_element(addr.base, addr.index, data)
elif type(addr) is SimSootValue_StaticFieldRef:
self.vm_static_table.store(addr.id, data, type_=addr.type)
elif type(addr) is SimSootValue_InstanceFieldRef:
self.heap.store(addr.id, data, type_=addr.type)
elif type(addr) is SimSootValue_StringRef:
self.heap.store(addr.id, data, type_=addr.type)
else:
l.error("Unknown addr type %s", addr)
def load(self, addr, frame=0, none_if_missing=False): # pylint: disable=arguments-differ
if type(addr) is SimSootValue_Local:
cstack = self._stack[-1+(-1*frame)]
return cstack.load(addr.id, none_if_missing=none_if_missing)
elif type(addr) is SimSootValue_ArrayRef:
return self.load_array_element(addr.base, addr.index)
elif type(addr) is SimSootValue_ParamRef:
cstack = self._stack[-1+(-1*frame)]
return cstack.load(addr.id, none_if_missing=none_if_missing)
elif type(addr) is SimSootValue_StaticFieldRef:
value = self.vm_static_table.load(addr.id, none_if_missing=none_if_missing)
if value is None:
# initialize field
value = self.state.project.simos.get_default_value_by_type(addr.type, state=self.state)
l.debug("Initializing static field %s with %s.", addr, value)
self.store(addr, value)
return value
elif type(addr) is SimSootValue_InstanceFieldRef:
value = self.heap.load(addr.id, none_if_missing=none_if_missing)
if value is None:
# initialize field
value = self.state.project.simos.get_default_value_by_type(addr.type, state=self.state)
l.debug("Initializing field %s with %s.", addr, value)
self.store(addr, value)
return value
elif type(addr) is SimSootValue_StringRef:
return self.heap.load(addr.id, none_if_missing=none_if_missing)
else:
l.error("Unknown addr type %s", addr)
return None
def push_stack_frame(self):
self._stack.append(SimKeyValueMemory("mem"))
def pop_stack_frame(self):
self._stack = self._stack[:-1]
@property
def stack(self):
return self._stack[-1]
#
# Array // Store
#
def store_array_element(self, array, idx, value):
self.store_array_elements(array, idx, value)
def store_array_elements(self, array, start_idx, data):
"""
Stores either a single element or a range of elements in the array.
:param array: Reference to the array.
:param start_idx: Starting index for the store.
:param data: Either a single value or a list of values.
"""
# we process data as a list of elements
# => if there is only a single element, wrap it in a list
data = data if isinstance(data, list) else [data]
# concretize start index
concrete_start_idxes = self.concretize_store_idx(start_idx)
if len(concrete_start_idxes) == 1:
# only one start index
# => concrete store
concrete_start_idx = concrete_start_idxes[0]
for i, value in enumerate(data):
self._store_array_element_on_heap(array=array,
idx=concrete_start_idx+i,
value=value,
value_type=array.element_type)
# if the index was symbolic before concretization, this
# constraint it to concrete start idx
self.state.solver.add(concrete_start_idx == start_idx)
else:
# multiple start indexes
# => symbolic store
start_idx_options = []
for concrete_start_idx in concrete_start_idxes:
start_idx_options.append(concrete_start_idx == start_idx)
# we store elements condtioned with the start index:
# => if concrete_start_idx == start_idx
# then store the value
# else keep the current value
for i, value in enumerate(data):
self._store_array_element_on_heap(array=array,
idx=concrete_start_idx+i,
value=value,
value_type=array.element_type,
store_condition=start_idx_options[-1])
# constraint start_idx, s.t. it evals to one of the concretized indexes
constraint_on_start_idx = self.state.solver.Or(*start_idx_options)
self.state.add_constraints(constraint_on_start_idx)
def _store_array_element_on_heap(self, array, idx, value, value_type, store_condition=None):
heap_elem_id = '%s[%d]' % (array.id, idx)
l.debug("Set %s to %s with condition %s", heap_elem_id, value, store_condition)
if store_condition is not None:
current_value = self._load_array_element_from_heap(array, idx)
new_value = value
value = self.state.solver.If(store_condition, new_value, current_value)
self.heap.store(heap_elem_id, value, value_type)
#
# Array // Load
#
def load_array_element(self, array, idx):
return self.load_array_elements(array, idx, 1)[0]
def load_array_elements(self, array, start_idx, no_of_elements):
"""
Loads either a single element or a range of elements from the array.
:param array: Reference to the array.
:param start_idx: Starting index for the load.
:param no_of_elements: Number of elements to load.
"""
# concretize start index
concrete_start_idxes = self.concretize_load_idx(start_idx)
if len(concrete_start_idxes) == 1:
# only one start index
# => concrete load
concrete_start_idx = concrete_start_idxes[0]
load_values = [self._load_array_element_from_heap(array, idx)
for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)]
# if the index was symbolic before concretization, this
# constraint it to concrete start idx
self.state.solver.add(start_idx == concrete_start_idx)
else:
# multiple start indexes
# => symbolic load
# start with load values for the first concrete index
concrete_start_idx = concrete_start_idxes[0]
load_values = [self._load_array_element_from_heap(array, idx)
for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)]
start_idx_options = [concrete_start_idx == start_idx]
# update load values with all remaining start indexes
for concrete_start_idx in concrete_start_idxes[1:]:
# load values for this start index
values = [self._load_array_element_from_heap(array, idx)
for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)]
# update load values with the new ones
for i, value in enumerate(values):
# condition every value with the start idx
# => if concrete_start_idx == start_idx
# then use new value
# else use the current value
load_values[i] = self.state.solver.If(
concrete_start_idx == start_idx,
value,
load_values[i]
)
start_idx_options.append(start_idx == concrete_start_idx)
# constraint start_idx, s.t. it evals to one of the concretized indexes
constraint_on_start_idx = self.state.solver.Or(*start_idx_options)
self.state.add_constraints(constraint_on_start_idx)
return load_values
def _load_array_element_from_heap(self, array, idx):
# try to load the element
heap_elem_id = '%s[%d]' % (array.id, idx)
value = self.heap.load(heap_elem_id, none_if_missing=True)
# if it's not available, initialize it
if value is None:
value = array.get_default_value(self.state)
l.debug("Init %s with %s", heap_elem_id, value)
element_type = value.element_type if hasattr(value, 'element_type') else None
self.heap.store(heap_elem_id, value, type_=element_type)
else:
l.debug("Load %s from %s", heap_elem_id, value)
return value
#
# Concretization strategies
#
def _apply_concretization_strategies(self, idx, strategies, action): # pylint: disable=unused-argument
"""
Applies concretization strategies on the index, until one of them succeeds.
"""
for s in strategies:
try:
idxes = s.concretize(self, idx)
except SimUnsatError:
idxes = None
if idxes:
return idxes
raise SimMemoryAddressError("Unable to concretize index %s" % idx)
def concretize_store_idx(self, idx, strategies=None):
"""
Concretizes a store index.
:param idx: An expression for the index.
:param strategies: A list of concretization strategies (to override the default).
:param min_idx: Minimum value for a concretized index (inclusive).
:param max_idx: Maximum value for a concretized index (exclusive).
:returns: A list of concrete indexes.
"""
if isinstance(idx, int):
return [idx]
elif not self.state.solver.symbolic(idx):
return [self.state.solver.eval(idx)]
strategies = self.store_strategies if strategies is None else strategies
return self._apply_concretization_strategies(idx, strategies, 'store')
def concretize_load_idx(self, idx, strategies=None):
"""
Concretizes a load index.
:param idx: An expression for the index.
:param strategies: A list of concretization strategies (to override the default).
:param min_idx: Minimum value for a concretized index (inclusive).
:param max_idx: Maximum value for a concretized index (exclusive).
:returns: A list of concrete indexes.
"""
if isinstance(idx, int):
return [idx]
elif not self.state.solver.symbolic(idx):
return [self.state.solver.eval(idx)]
strategies = self.load_strategies if strategies is None else strategies
return self._apply_concretization_strategies(idx, strategies, 'load')
def _create_default_load_strategies(self):
# reset dict
self.load_strategies = []
# symbolically read up to 1024 elements
s = concretization_strategies.SimConcretizationStrategyRange(1024)
self.load_strategies.append(s)
# if range is too big, fallback to load only one arbitrary element
s = concretization_strategies.SimConcretizationStrategyAny()
self.load_strategies.append(s)
def _create_default_store_strategies(self):
# reset dict
self.store_strategies = []
# symbolically write up to 256 elements
s = concretization_strategies.SimConcretizationStrategyRange(256)
self.store_strategies.append(s)
# if range is too big, fallback to store only the last element
s = concretization_strategies.SimConcretizationStrategyMax()
self.store_strategies.append(s)
#
# MISC
#
def set_state(self, state):
super(SimJavaVmMemory, self).set_state(state)
if not self.load_strategies:
self._create_default_load_strategies()
if not self.store_strategies:
self._create_default_store_strategies()
@SimStatePlugin.memo
def copy(self, memo): # pylint: disable=unused-argument
return SimJavaVmMemory(
memory_id=self.id,
stack=[stack_frame.copy() for stack_frame in self._stack],
heap=self.heap.copy(),
vm_static_table=self.vm_static_table.copy(),
load_strategies=[s.copy() for s in self.load_strategies],
store_strategies=[s.copy() for s in self.store_strategies]
)
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
l.warning("Merging is not implemented for JavaVM memory!")
return False
def widen(self, others): # pylint: disable=unused-argument
l.warning("Widening is not implemented for JavaVM memory!")
return False
def _find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1): # pylint: disable=unused-argument
l.warning("Find is not implemented for JavaVM memory!")
return None
def _load(self, _addr, _size, condition=None, fallback=None, # pylint: disable=unused-argument
inspect=True, events=True, ret_on_segv=False):
raise NotImplementedError("JavaVM memory overwrites load function directly.")
def _store(self, _request): # pylint: disable=unused-argument
raise NotImplementedError("JavaVM memory overwrites store function directly.")
SimState.register_default('javavm_memory', SimJavaVmMemory)
| iamahuman/angr | angr/state_plugins/javavm_memory.py | Python | bsd-2-clause | 16,227 |
# $Id: 8066a5bbef6962141ae539bef06493250cbeab57 $
"""
SQLite3 extended database driver.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import os
import sys
import re
from grizzled.db.base import (DBDriver, Error, Warning, TableMetadata,
IndexMetadata, RDBMSMetadata)
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class SQLite3Driver(DBDriver):
"""DB Driver for SQLite, using the pysqlite DB API module."""
def get_import(self):
import sqlite3
return sqlite3
def get_display_name(self):
return "SQLite3"
def do_connect(self,
host=None,
port=None,
user='',
password='',
database='default'):
dbi = self.get_import()
return dbi.connect(database=database, isolation_level=None)
def get_rdbms_metadata(self, cursor):
import sqlite3
return RDBMSMetadata('SQLite', 'SQLite 3', sqlite3.sqlite_version)
def get_tables(self, cursor):
cursor.execute("select name from sqlite_master where type = 'table'")
table_names = []
rs = cursor.fetchone()
while rs is not None:
table_names += [rs[0]]
rs = cursor.fetchone()
return table_names
def get_table_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
# The table_info pragma returns results looking something like this:
#
# cid name type notnull dflt_value pk
# --- --------------- ----------------- ------- ---------- --
# 0 id integer 99 NULL 1
# 1 action_time datetime 99 NULL 0
# 2 user_id integer 99 NULL 0
# 3 content_type_id integer 0 NULL 0
# 4 object_id text 0 NULL 0
# 5 object_repr varchar(200) 99 NULL 0
# 6 action_flag smallint unsigned 99 NULL 0
# 7 change_message text 99 NULL 0
cursor.execute('PRAGMA table_info(%s)' % table)
rs = cursor.fetchone()
result = []
char_field_re = re.compile(r'(varchar|char)\((\d+)\)')
while rs is not None:
(id, name, type, not_null, default_value, is_primary) = rs
m = char_field_re.match(type)
if m:
type = m.group(1)
try:
max_char_size = int(m.group(2))
except ValueError:
log.error('Bad number in "%s" type for column "%s"' %
(type, name))
else:
max_char_size = 0
data = TableMetadata(name, type, max_char_size, 0, 0, not not_null)
result += [data]
rs = cursor.fetchone()
return result
def get_index_metadata(self, table, cursor):
self._ensure_valid_table(cursor, table)
# First, get the list of indexes for the table, using the appropriate
# pragma. The pragma returns output like this:
#
# seq name unique
# --- ------- ------
# 0 id 0
# 1 name 0
# 2 address 0
result = []
cursor.execute("PRAGMA index_list('%s')" % table)
indexes = []
rs = cursor.fetchone()
while rs is not None:
indexes += [(rs[1], rs[2])]
rs = cursor.fetchone()
# Now, get the data about each index, using another pragma. This
# pragma returns data like this:
#
# seqno cid name
# ----- --- ---------------
# 0 3 content_type_id
for name, unique in indexes:
cursor.execute("PRAGMA index_info('%s')" % name)
rs = cursor.fetchone()
columns = []
while rs is not None:
columns += [rs[2]]
rs = cursor.fetchone()
description = 'UNIQUE' if unique else ''
result += [IndexMetadata(name, columns, description)]
return result
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/grizzled/grizzled/db/sqlite.py | Python | bsd-3-clause | 4,689 |
# -*- coding: ascii -*-
#
# Util/_number_new.py : utility functions
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
## NOTE: Do not import this module directly. Import these functions from Cryptodome.Util.number.
__all__ = ['ceil_shift', 'ceil_div', 'floor_div', 'exact_log2', 'exact_div']
def ceil_shift(n, b):
"""Return ceil(n / 2**b) without performing any floating-point or division operations.
This is done by right-shifting n by b bits and incrementing the result by 1
if any '1' bits were shifted out.
"""
if not isinstance(n, int) or not isinstance(b, int):
raise TypeError("unsupported operand type(s): %r and %r" % (type(n).__name__, type(b).__name__))
assert n >= 0 and b >= 0 # I haven't tested or even thought about negative values
mask = (1 << b) - 1
if n & mask:
return (n >> b) + 1
else:
return n >> b
def ceil_div(a, b):
"""Return ceil(a / b) without performing any floating-point operations."""
if not isinstance(a, int) or not isinstance(b, int):
raise TypeError("unsupported operand type(s): %r and %r" % (type(a).__name__, type(b).__name__))
(q, r) = divmod(a, b)
if r:
return q + 1
else:
return q
def floor_div(a, b):
if not isinstance(a, int) or not isinstance(b, int):
raise TypeError("unsupported operand type(s): %r and %r" % (type(a).__name__, type(b).__name__))
(q, r) = divmod(a, b)
return q
def exact_log2(num):
"""Find and return an integer i >= 0 such that num == 2**i.
If no such integer exists, this function raises ValueError.
"""
if not isinstance(num, int):
raise TypeError("unsupported operand type: %r" % (type(num).__name__,))
n = int(num)
if n <= 0:
raise ValueError("cannot compute logarithm of non-positive number")
i = 0
while n != 0:
if (n & 1) and n != 1:
raise ValueError("No solution could be found")
i += 1
n >>= 1
i -= 1
assert num == (1 << i)
return i
def exact_div(p, d, allow_divzero=False):
"""Find and return an integer n such that p == n * d
If no such integer exists, this function raises ValueError.
Both operands must be integers.
If the second operand is zero, this function will raise ZeroDivisionError
unless allow_divzero is true (default: False).
"""
if not isinstance(p, int) or not isinstance(d, int):
raise TypeError("unsupported operand type(s): %r and %r" % (type(p).__name__, type(d).__name__))
if d == 0 and allow_divzero:
n = 0
if p != n * d:
raise ValueError("No solution could be found")
else:
(n, r) = divmod(p, d)
if r != 0:
raise ValueError("No solution could be found")
assert p == n * d
return n
# vim:set ts=4 sw=4 sts=4 expandtab:
| Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/Cryptodome/Util/_number_new.py | Python | gpl-2.0 | 3,847 |
#!/usr/bin/env python
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.bytecodes import apk
from xml.dom import minidom
ap = apk.AXMLPrinter( open("examples/axml/AndroidManifest2.xml", "r").read() )
print minidom.parseString( ap.getBuff() ).toxml()
| uTest/Androguard | demos/axml_format_1.py | Python | apache-2.0 | 289 |
# ====================================================================
# Copyright (c) 2014 Open Source Applications Foundation.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ====================================================================
#
import sys, os
from unittest import TestCase, main
from icu import *
class TestListFormatter(TestCase):
def testDefault(self):
Locale.setDefault(Locale.getUS())
formatter = ListFormatter.createInstance()
text = formatter.format(('a', 'b', 'c'))
self.assertTrue(text == u'a, b, and c')
def testLocale(self):
formatter = ListFormatter.createInstance(Locale.getFrance())
text = formatter.format(('a', 'b', 'c'))
self.assertTrue(text == u'a, b et c')
if __name__ == "__main__":
main()
| sciyoshi/pyicu | test/test_ListFormatter.py | Python | mit | 1,821 |
import logging
import subprocess
from flexmock import flexmock
import borgmatic.hooks.command
from borgmatic.commands import borgmatic as module
def test_run_configuration_runs_actions_for_each_repository():
flexmock(module.borg_environment).should_receive('initialize')
expected_results = [flexmock(), flexmock()]
flexmock(module).should_receive('run_actions').and_return(expected_results[:1]).and_return(
expected_results[1:]
)
config = {'location': {'repositories': ['foo', 'bar']}}
arguments = {'global': flexmock(monitoring_verbosity=1)}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
def test_run_configuration_calls_hooks_for_prune_action():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').twice()
flexmock(module.dispatch).should_receive('call_hooks').at_least().twice()
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'prune': flexmock()}
list(module.run_configuration('test.yaml', config, arguments))
def test_run_configuration_executes_and_calls_hooks_for_create_action():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').twice()
flexmock(module.dispatch).should_receive('call_hooks').at_least().twice()
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
list(module.run_configuration('test.yaml', config, arguments))
def test_run_configuration_calls_hooks_for_check_action():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').twice()
flexmock(module.dispatch).should_receive('call_hooks').at_least().twice()
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'check': flexmock()}
list(module.run_configuration('test.yaml', config, arguments))
def test_run_configuration_calls_hooks_for_extract_action():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').twice()
flexmock(module.dispatch).should_receive('call_hooks').never()
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'extract': flexmock()}
list(module.run_configuration('test.yaml', config, arguments))
def test_run_configuration_does_not_trigger_hooks_for_list_action():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').never()
flexmock(module.dispatch).should_receive('call_hooks').never()
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'list': flexmock()}
list(module.run_configuration('test.yaml', config, arguments))
def test_run_configuration_logs_actions_error():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook')
flexmock(module.dispatch).should_receive('call_hooks')
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').and_raise(OSError)
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False)}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
def test_run_configuration_logs_pre_hook_error():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').and_raise(OSError).and_return(None)
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').never()
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
def test_run_configuration_bails_for_pre_hook_soft_failure():
flexmock(module.borg_environment).should_receive('initialize')
error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
flexmock(module.command).should_receive('execute_hook').and_raise(error).and_return(None)
flexmock(module).should_receive('make_error_log_records').never()
flexmock(module).should_receive('run_actions').never()
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == []
def test_run_configuration_logs_post_hook_error():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(
OSError
).and_return(None)
flexmock(module.dispatch).should_receive('call_hooks')
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
def test_run_configuration_bails_for_post_hook_soft_failure():
flexmock(module.borg_environment).should_receive('initialize')
error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(
error
).and_return(None)
flexmock(module.dispatch).should_receive('call_hooks')
flexmock(module).should_receive('make_error_log_records').never()
flexmock(module).should_receive('run_actions').and_return([])
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == []
def test_run_configuration_logs_on_error_hook_error():
flexmock(module.borg_environment).should_receive('initialize')
flexmock(module.command).should_receive('execute_hook').and_raise(OSError)
expected_results = [flexmock(), flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(
expected_results[:1]
).and_return(expected_results[1:])
flexmock(module).should_receive('run_actions').and_raise(OSError)
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
def test_run_configuration_bails_for_on_error_hook_soft_failure():
flexmock(module.borg_environment).should_receive('initialize')
error = subprocess.CalledProcessError(borgmatic.hooks.command.SOFT_FAIL_EXIT_CODE, 'try again')
flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(error)
expected_results = [flexmock()]
flexmock(module).should_receive('make_error_log_records').and_return(expected_results)
flexmock(module).should_receive('run_actions').and_raise(OSError)
config = {'location': {'repositories': ['foo']}}
arguments = {'global': flexmock(monitoring_verbosity=1, dry_run=False), 'create': flexmock()}
results = list(module.run_configuration('test.yaml', config, arguments))
assert results == expected_results
def test_load_configurations_collects_parsed_configurations():
configuration = flexmock()
other_configuration = flexmock()
flexmock(module.validate).should_receive('parse_configuration').and_return(
configuration
).and_return(other_configuration)
configs, logs = tuple(module.load_configurations(('test.yaml', 'other.yaml')))
assert configs == {'test.yaml': configuration, 'other.yaml': other_configuration}
assert logs == []
def test_load_configurations_logs_critical_for_parse_error():
flexmock(module.validate).should_receive('parse_configuration').and_raise(ValueError)
configs, logs = tuple(module.load_configurations(('test.yaml',)))
assert configs == {}
assert {log.levelno for log in logs} == {logging.CRITICAL}
def test_log_record_does_not_raise():
module.log_record(levelno=1, foo='bar', baz='quux')
def test_log_record_with_suppress_does_not_raise():
module.log_record(levelno=1, foo='bar', baz='quux', suppress_log=True)
def test_make_error_log_records_generates_output_logs_for_message_only():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error'))
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
def test_make_error_log_records_generates_output_logs_for_called_process_error():
flexmock(module).should_receive('log_record').replace_with(dict)
flexmock(module.logger).should_receive('getEffectiveLevel').and_return(logging.WARNING)
logs = tuple(
module.make_error_log_records(
'Error', subprocess.CalledProcessError(1, 'ls', 'error output')
)
)
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
assert any(log for log in logs if 'error output' in str(log))
def test_make_error_log_records_generates_logs_for_value_error():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error', ValueError()))
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
def test_make_error_log_records_generates_logs_for_os_error():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error', OSError()))
assert {log['levelno'] for log in logs} == {logging.CRITICAL}
def test_make_error_log_records_generates_nothing_for_other_error():
flexmock(module).should_receive('log_record').replace_with(dict)
logs = tuple(module.make_error_log_records('Error', KeyError()))
assert logs == ()
def test_get_local_path_uses_configuration_value():
assert module.get_local_path({'test.yaml': {'location': {'local_path': 'borg1'}}}) == 'borg1'
def test_get_local_path_without_location_defaults_to_borg():
assert module.get_local_path({'test.yaml': {}}) == 'borg'
def test_get_local_path_without_local_path_defaults_to_borg():
assert module.get_local_path({'test.yaml': {'location': {}}}) == 'borg'
def test_collect_configuration_run_summary_logs_info_for_success():
flexmock(module.command).should_receive('execute_hook').never()
flexmock(module).should_receive('run_configuration').and_return([])
arguments = {}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert {log.levelno for log in logs} == {logging.INFO}
def test_collect_configuration_run_summary_executes_hooks_for_create():
flexmock(module).should_receive('run_configuration').and_return([])
arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert {log.levelno for log in logs} == {logging.INFO}
def test_collect_configuration_run_summary_logs_info_for_success_with_extract():
flexmock(module.validate).should_receive('guard_configuration_contains_repository')
flexmock(module).should_receive('run_configuration').and_return([])
arguments = {'extract': flexmock(repository='repo')}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert {log.levelno for log in logs} == {logging.INFO}
def test_collect_configuration_run_summary_logs_extract_with_repository_error():
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
ValueError
)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'extract': flexmock(repository='repo')}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert logs == expected_logs
def test_collect_configuration_run_summary_logs_info_for_success_with_mount():
flexmock(module.validate).should_receive('guard_configuration_contains_repository')
flexmock(module).should_receive('run_configuration').and_return([])
arguments = {'mount': flexmock(repository='repo')}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert {log.levelno for log in logs} == {logging.INFO}
def test_collect_configuration_run_summary_logs_mount_with_repository_error():
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
ValueError
)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'mount': flexmock(repository='repo')}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert logs == expected_logs
def test_collect_configuration_run_summary_logs_missing_configs_error():
arguments = {'global': flexmock(config_paths=[])}
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
logs = tuple(module.collect_configuration_run_summary_logs({}, arguments=arguments))
assert logs == expected_logs
def test_collect_configuration_run_summary_logs_pre_hook_error():
flexmock(module.command).should_receive('execute_hook').and_raise(ValueError)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert logs == expected_logs
def test_collect_configuration_run_summary_logs_post_hook_error():
flexmock(module.command).should_receive('execute_hook').and_return(None).and_raise(ValueError)
flexmock(module).should_receive('run_configuration').and_return([])
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'create': flexmock(), 'global': flexmock(monitoring_verbosity=1, dry_run=False)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert expected_logs[0] in logs
def test_collect_configuration_run_summary_logs_for_list_with_archive_and_repository_error():
flexmock(module.validate).should_receive('guard_configuration_contains_repository').and_raise(
ValueError
)
expected_logs = (flexmock(),)
flexmock(module).should_receive('make_error_log_records').and_return(expected_logs)
arguments = {'list': flexmock(repository='repo', archive='test')}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert logs == expected_logs
def test_collect_configuration_run_summary_logs_info_for_success_with_list():
flexmock(module).should_receive('run_configuration').and_return([])
arguments = {'list': flexmock(repository='repo', archive=None)}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert {log.levelno for log in logs} == {logging.INFO}
def test_collect_configuration_run_summary_logs_run_configuration_error():
flexmock(module.validate).should_receive('guard_configuration_contains_repository')
flexmock(module).should_receive('run_configuration').and_return(
[logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))]
)
flexmock(module).should_receive('make_error_log_records').and_return([])
arguments = {}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert {log.levelno for log in logs} == {logging.CRITICAL}
def test_collect_configuration_run_summary_logs_run_umount_error():
flexmock(module.validate).should_receive('guard_configuration_contains_repository')
flexmock(module).should_receive('run_configuration').and_return([])
flexmock(module.borg_umount).should_receive('unmount_archive').and_raise(OSError)
flexmock(module).should_receive('make_error_log_records').and_return(
[logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg='Error'))]
)
arguments = {'umount': flexmock(mount_point='/mnt')}
logs = tuple(
module.collect_configuration_run_summary_logs({'test.yaml': {}}, arguments=arguments)
)
assert {log.levelno for log in logs} == {logging.INFO, logging.CRITICAL}
def test_collect_configuration_run_summary_logs_outputs_merged_json_results():
flexmock(module).should_receive('run_configuration').and_return(['foo', 'bar']).and_return(
['baz']
)
stdout = flexmock()
stdout.should_receive('write').with_args('["foo", "bar", "baz"]').once()
flexmock(module.sys).stdout = stdout
arguments = {}
tuple(
module.collect_configuration_run_summary_logs(
{'test.yaml': {}, 'test2.yaml': {}}, arguments=arguments
)
)
| witten/borgmatic | tests/unit/commands/test_borgmatic.py | Python | gpl-3.0 | 18,872 |
from article import ArticleForm | liyigerry/caixiang | mysite/forms/__init__.py | Python | mit | 31 |
import glob
import logging
import os
import subprocess
from plugins import BaseAssembler
from yapsy.IPlugin import IPlugin
class A6Assembler(BaseAssembler, IPlugin):
# TODO: update quast logic. For now output scaffolds as contigs.
# OUTPUT = 'scaffolds'
def run(self):
reads = self.data.readsets
cmd_args = [self.executable]
num_pe = 0
libfile = open(os.path.join(self.outpath, 'a5lib.out'), 'w')
for d in reads:
libfile.write('[LIB]\n')
if d.type == 'paired':
num_pe += 1
if len(d.files) == 1:
libfile.write('shuf={}\n'.format(d.files[0]))
elif len(d.files) == 2:
libfile.write('p1={}\n'.format(d.files[0]))
libfile.write('p2={}\n'.format(d.files[1]))
elif d.type == 'single':
for up in d.files:
libfile.write('up={}\n'.format(up))
try:
assert d.insert is not None
libfile.write('ins={}\n'.format(d.insert))
except:
logging.info('No Insert Info Given')
cmd_args.append(libfile.name)
libfile.close()
if not num_pe:
logging.error('a6 expect at least one paired-end library')
return
cmd_args.append('a6')
self.arast_popen(cmd_args, cwd=self.outpath)
contigs = glob.glob(self.outpath + '/*.contigs.fasta')
scaffolds = glob.glob(self.outpath + '/*.final.scaffolds.fasta')
output = {}
if contigs:
# output['contigs'] = contigs
output['contigs'] = scaffolds
if scaffolds:
output['scaffolds'] = scaffolds
return output
| sebhtml/assembly | lib/assembly/plugins/a6.py | Python | mit | 1,774 |
# coding: utf-8
import requests
import credential
def swift_auth():
cred = credential.credential()
userId, password, projectId = cred.get()
url = 'https://lon-identity.open.softlayer.com/v3/auth/tokens'
headers={'Content-Type': 'application/json'}
data = '''{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "%s",
"password": "%s"
}
}
},
"scope": {
"project": {
"id": "%s"
}
}
}
}''' % (userId, password, projectId)
response = requests.post(url, data=data, headers=headers)
token = response.headers['X-Subject-Token']
endpoint = response.json()['token']['catalog'][7]['endpoints'][3]['url']
return [token, endpoint]
def swift_get(token, endpoint):
container = endpoint + '/test'
headers = {'X-Auth-Token': token}
response = requests.get(container, headers=headers)
print (response.status_code)
print (response.headers)
print (response.text)
def swift_put(token, endpoint):
put_filepath = endpoint + '/test/python-test.txt'
headers = {'X-Auth-Token': token, 'Content-Type': 'text/html; charset=UTF-8'}
with open('./python-test.txt') as myfile:
mydata = myfile.read()
response = requests.put(put_filepath, headers=headers, data=mydata)
print (response.status_code)
print (response.headers)
print (response.text)
if __name__ == "__main__":
token, endpoint = swift_auth()
swift_get(token, endpoint)
swift_put(token, endpoint)
| tomoyuki-nakabayashi/bluemix-object-storage-test | src/swift-post.py | Python | mit | 1,778 |
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
# We use /people/[query] to automatically load the people list view
# and search for query. There are tree reserved urls, r'/people/me/?',
# r'/people/invite/?' and r'/people/alumni/?' which we want to point to
# 'profiles_view_my_profile, 'profiles_invite' and 'profiles_list_alumni'
# accordingly. If a user hits these urls without a trailing / we redirect him
# permanently to the url with the trailing slash, to avoid reaching
# the people list view with 'me' or 'invite' as a search term.
urlpatterns = patterns(
'',
url(r'^invite/$', 'remo.profiles.views.invite', name='profiles_invite'),
url(r'^invite$', RedirectView.as_view(url='invite/', permanent=True)),
url(r'^alumni$', RedirectView.as_view(url='alumni/', permanent=True)),
url(r'^alumni/$', 'remo.profiles.views.list_alumni',
name='profiles_alumni'),
url(r'^$', 'remo.profiles.views.list_profiles',
name='profiles_list_profiles'),
# This url is intentionally left without a $
url(r'^', 'remo.profiles.views.redirect_list_profiles',
name='profiles_list_profiles_redirect'),
)
| chirilo/remo | remo/profiles/people_urls.py | Python | bsd-3-clause | 1,185 |
def add_str_num(num_a: str, num_b: str) -> str:
m = len(num_a)
n = len(num_b)
size = max(m, n)
x = list(reversed(num_a))
y = list(reversed(num_b))
while len(x) < size:
x.append('0')
while len(y) < size:
y.append('0')
ans = [0] * size
add_amount = 0
for i in range(size):
tmp = int(x[i]) + int(y[i]) + add_amount
ans[i] = tmp % 10
add_amount = tmp // 10
while add_amount > 0:
ans.append(add_amount % 10)
add_amount //= 10
return ''.join(map(str, reversed(ans)))
def _check(num, begin, l1, l2):
p = begin + l1
q = p + l2
x = num[begin:p]
y = num[p:q]
if l1 > 1 and x.startswith('0'):
return False, None
if l2 > 1 and y.startswith('0'):
return False, None
res = add_str_num(x, y)
end = q + len(res)
if end > len(num):
return False, len(res)
return num[q:end] == res, len(res)
def check_additive(num: str, w1: int, w2: int) -> bool:
size = len(num)
flag = True
begin, l1, l2 = 0, w1, w2
while flag:
flag, next_len = _check(num, begin, l1, l2)
if not flag:
break
begin += l1
l1 = l2
l2 = next_len
if begin + l1 + l2 == size:
break
return flag
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
length = len(num)
if length < 3:
return False
for w1 in range(1, length // 2 + 1):
for w2 in range(1, length // 2 + 1):
if check_additive(num, w1, w2):
return True
return False
if __name__ == "__main__":
sol = Solution()
x = '112358'
print(sol.isAdditiveNumber(x))
x = '199100199'
print(sol.isAdditiveNumber(x))
x = ''
print(sol.isAdditiveNumber(x))
x = '101235'
print(sol.isAdditiveNumber(x))
x = "198019823962"
print(sol.isAdditiveNumber(x))
| shenfei/oj_codes | leetcode/python/n306_Additive_Number.py | Python | mit | 1,960 |
#!/usr/bin/python2.6
#
# Copyright (C) Christian Thurau, 2010.
# Licensed under the GNU General Public License (GPL).
# http://www.gnu.org/licenses/gpl.txt
"""
PyMF functions for computing matrix/simplex volumes
cmdet(): Cayley-Menger Determinant
simplex_volume(): Ordinary simplex volume
"""
import numpy as np
try:
from scipy.misc.common import factorial
except:
from scipy.misc import factorial
__all__ = ["cmdet", "simplex"]
def cmdet(d):
# compute the CMD determinant of the euclidean distance matrix d
# -> d should not be squared!
D = np.ones((d.shape[0]+1,d.shape[0]+1))
D[0,0] = 0.0
D[1:,1:] = d**2
j = np.float32(D.shape[0]-2)
f1 = (-1.0)**(j+1) / ( (2**j) * ((factorial(j))**2))
cmd = f1 * np.linalg.det(D)
# sometimes, for very small values "cmd" might be negative ...
return np.sqrt(np.abs(cmd))
def simplex(d):
# compute the simplex volume using coordinates
D = np.ones((d.shape[0]+1, d.shape[1]))
D[1:,:] = d
vol = np.abs(np.linalg.det(D)) / factorial(d.shape[1] - 1)
return vol
| urinieto/SegmenterMIREX2014 | pymf/vol.py | Python | bsd-3-clause | 1,024 |
import click
from chakin.cli import pass_context, json_loads
from chakin.decorators import custom_exception, dict_output
@click.command('load_fasta')
@click.argument("fasta", type=str)
@click.argument("organism_id", type=int)
@click.option(
"--sequence_type",
help="Sequence type",
default="contig",
show_default=True,
type=str
)
@click.option(
"--analysis_id",
help="Analysis ID",
type=int
)
@click.option(
"--re_name",
help="Regular expression to extract the feature name from the fasta sequence id (first capturing group will be used).",
type=str
)
@click.option(
"--re_uniquename",
help="Regular expression to extract the feature name from the fasta sequence id (first capturing group will be used).",
type=str
)
@click.option(
"--match_on_name",
help="Match existing features using their name instead of their uniquename",
is_flag=True
)
@click.option(
"--update",
help="Update existing feature with new sequence instead of throwing an error",
is_flag=True
)
@click.option(
"--db",
help="External database to cross reference to.",
type=int
)
@click.option(
"--re_db_accession",
help="Regular expression to extract an external database accession from the fasta sequence id (first capturing group will be used).",
type=str
)
@click.option(
"--rel_type",
help="Relation type to parent feature ('part_of' or 'derives_from').",
type=str
)
@click.option(
"--re_parent",
help="Regular expression to extract parent uniquename from the fasta sequence id (first capturing group will be used).",
type=str
)
@click.option(
"--parent_type",
help="Sequence type of the parent feature",
type=str
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, fasta, organism_id, sequence_type="contig", analysis_id="", re_name="", re_uniquename="", match_on_name=False, update=False, db="", re_db_accession="", rel_type="", re_parent="", parent_type=""):
"""Load features from a fasta file
Output:
Number of inserted sequences
"""
return ctx.gi.feature.load_fasta(fasta, organism_id, sequence_type=sequence_type, analysis_id=analysis_id, re_name=re_name, re_uniquename=re_uniquename, match_on_name=match_on_name, update=update, db=db, re_db_accession=re_db_accession, rel_type=rel_type, re_parent=re_parent, parent_type=parent_type)
| abretaud/python-chado | chakin/commands/feature/load_fasta.py | Python | mit | 2,380 |
# -*- Mode: Python; coding: utf-8 -*-
##
## Copyright (C) 2010 Mandriva S.A. <http://www.mandriva.com>
## All rights reserved
##
## This program is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by the Free
## Software Foundation, either version 3 of the License, or any later version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
## FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
## more details.
##
## You should have received a copy of the GNU General Public License along with
## this program. If not, see <http://www.gnu.org/licenses/>.
##
## Contributor(s): J. Victor Duarte Martins <jvictor@mandriva.com>
##
"""
A synthesis.hdlist.cz parser
"""
import gzip
import re
import sys
from backports import lzma
__all__ = ['parse']
def _parse_rpm_name(name):
"""Returns (name, version, release, arch) tuple from a rpm
package name.
"""
# TODO This could be put in a package for general use in other
# scripts.
parts = name.split('-')
release_arch = parts[-1].split('.')
arch = release_arch[-1]
dist = '.'.join(release_arch[:-1])
release = parts[-2]
version = parts[-3]
name = '-'.join(parts[0:-3])
return (name, version, release, dist, arch)
def _parse_rpm_capability_list(cap_str_list):
"""Parse a list of capabilities specifications strings and
their restrictions. Returns a list of dictionaries for each
capability."""
cap_list = []
for cap_str in cap_str_list:
m = re.match('^(?P<name>[^[]+)(?:\[\*])*(?P<restriction>\[.*])?',
cap_str)
if m is None:
continue # ignore malformed names
cap = {'name': m.group('name')}
restriction = m.group('restriction')
if restriction is not None:
# TODO This will accept restrictions with only
# conditions, or invalid conditions (like =, or >=<):
r = re.match('\[(?P<condition>[<>=]*) *(?P<version>.*)]',
restriction)
if r is not None:
cap['restriction'] = {'condition': r.group('condition'),
'version': r.group('version')}
cap_list.append(cap)
return tuple(cap_list)
def handleline(pkg, line, add_raw):
if add_raw:
if 'raw' not in pkg:
pkg['raw'] = ''
pkg['raw'] += line
fields = line.rstrip('\n').split('@')[1:]
ltype = fields.pop(0)
if ltype == 'info':
(pkg['name'],
pkg['version'],
pkg['release'],
pkg['dist'],
pkg['arch']) = _parse_rpm_name(fields.pop(0))
for field in ('epoch', 'size', 'group'):
pkg[field] = fields.pop(0)
elif ltype == 'summary':
pkg['summary'] = fields.pop(0)
elif ltype in ('requires', 'provides', 'conflict', 'obsoletes'):
pkg[ltype] = _parse_rpm_capability_list(fields)
def parse(hdlist, add_raw=False):
"""Create a generator of packages parsed from synthesis hdlist
file."""
pkg = {}
try:
for line in gzip.open(hdlist, 'rb'):
handleline(pkg, line, add_raw)
if 'name' in pkg:
yield pkg
pkg = {}
except IOError:
for line in lzma.open(hdlist, 'rb'):
handleline(pkg, line, add_raw)
if 'name' in pkg:
yield pkg
pkg = {}
if __name__ == '__main__':
hdlist = sys.argv[1]
pkgs = sys.argv[2:]
found = []
metadata = []
for p in parse(hdlist, True):
metadata.append(p)
# do half-assed backwards search
for m in metadata:
if m['name'] in pkgs:
found.append(m)
# check if we didn't get all the packages
if len(pkgs) != len(found): # we've missed something
missing = []
for i in pkgs:
ok = False
for f in found:
if f['name'] == i:
ok = True
break
if ok == False:
missing.append(i)
print "could not find some packages in repo: " + ' '.join(missing)
sys.exit(1)
# we have all packages! now do basic dep resolution
for pkg in found:
if 'requires' in pkg:
for c in pkg['requires']:
lookingFor = c['name']
# search through all packages
satisfied = False
for m in metadata:
if m['name'] == lookingFor:
if m not in found:
found.append(m)
satisfied = True
break
for prov in m['provides']:
if prov['name'] == lookingFor:
if m not in found:
found.append(m)
satisfied = True
break
if satisfied == True:
break
if satisfied == False:
print "package %s requires unsatisfied dep %s" % (pkg['name'], lookingFor)
sys.exit(1)
# yay okay let's print package lists and exit
for pkg in found:
#if pkg['epoch'] == '0':
print "%s-%s-%s-%s.%s" % (pkg['name'],pkg['version'],pkg['release'],pkg['dist'],pkg['arch'])
#else:
# print "%s-%s:%s-%s-%s.%s" % (pkg['name'],pkg['epoch'],pkg['version'],pkg['release'],pkg['dist'],pkg['arch'])
# for p in parse(hdlist, True):
# print "-" * 70
# print p['raw']
# print ("name = %s\n"
# "version = %s\n"
# "release = %s\n"
# "arch = %s\n"
# "epoch = %s\n"
# "size = %s (%sK)\n"
# "group = %s\n"
# "summary:\n"
# "%s") % (p['name'], p['version'], p['release'], p['arch'],
# p['epoch'], p['size'], int(p['size']) / 1024.0, p['group'],
# p['summary'])
#
# for cap in ('requires', 'provides', 'conflict', 'obsoletes'):
# if cap in p:
# print cap
# for c in p[cap]:
# print "- name: %s" % c['name'],
# if 'restriction' in c:
# print "%s %s" % (c['restriction']['condition'],
# c['restriction']['version']),
# print
#raw_input()
| robxu9/omvbootstrap | parse_synthesis.py | Python | gpl-3.0 | 6,620 |
import datetime
import calendar
import logging
import operator
from collections import OrderedDict
import httpagentparser
from google.appengine.ext import ndb
class Url (ndb.Model):
url = ndb.StringProperty()
html_tag = ndb.StringProperty(default='a')
list_id = ndb.StringProperty()
campaign_id = ndb.StringProperty()
tags = ndb.StringProperty(repeated=True, required=False)
created = ndb.DateTimeProperty(auto_now_add=True)
class Stats (ndb.Model):
list_id = ndb.StringProperty()
campaign_id = ndb.StringProperty()
total_clicks = ndb.IntegerProperty(default=0)
total_opens = ndb.IntegerProperty(default=0)
total_sends = ndb.IntegerProperty(default=0)
clicks = ndb.JsonProperty(compressed=True)
tags = ndb.JsonProperty(compressed=True, required=False)
opens = ndb.JsonProperty(compressed=True)
clients = ndb.JsonProperty(compressed=True, required=False)
urls = ndb.JsonProperty(compressed=True, required=False)
created = ndb.DateTimeProperty(auto_now_add=True)
last_compiled = ndb.DateTimeProperty(auto_now=True)
def open_rate (self):
try:
rate = (self.total_opens / float(self.total_sends)) * 100
return round(rate)
except:
return 0
def clients_sorted (self):
return reversed(sorted(self.clients.iteritems(), key=operator.itemgetter(1)))
def tags_sorted (self):
tags = reversed(sorted(self.tags.iteritems(), key=operator.itemgetter(1)))
grouped = OrderedDict()
totals = {}
for tag in tags:
temp = tag[0].split(':')
if len(temp) > 1:
g = temp[0]
t = temp[1]
else:
g = 'Ungrouped'
t = temp[0]
if grouped.has_key(g):
grouped[g].append([t, None, tag[1]])
totals[g] += tag[1]
else:
grouped[g] = [[t, None, tag[1]]]
totals[g] = tag[1]
for key in grouped.keys():
if key != 'Ungrouped':
total = 0
for t in grouped[key]:
total += t[2]
for t in grouped[key]:
t[1] = round((float(t[2]) / total) * 100, 1)
return grouped.items()
def urls_sorted (self):
return reversed(sorted(self.urls.iteritems(), key=operator.itemgetter(1)))
def opens_pc (self, count):
pc = (float(count) / self.total_opens) * 100
return round(pc, 1)
def clicks_pc (self, count):
pc = (float(count) / self.total_clicks) * 100
return round(pc, 1)
def process_track (self, t, ptype):
if t.created.minute % 10 >= 5:
m = t.created.minute + (10 - (t.created.minute % 10))
else:
m = t.created.minute - (t.created.minute % 10)
if m == 60:
time = t.created.replace(minute=0, second=0, microsecond=0)
time = time + datetime.timedelta(hours=1)
else:
time = t.created.replace(minute=m, second=0, microsecond=0)
key = calendar.timegm(time.timetuple())
if self.temp.has_key(key):
self.temp[key] += 1
else:
self.temp[key] = 1
if ptype == 'clicks':
if t.tags:
for tag in t.tags:
if tag in self.tags:
self.tags[tag] += 1
else:
self.tags[tag] = 1
url = t.url.get().url
if url in self.urls:
self.urls[url] += 1
else:
self.urls[url] = 1
elif ptype == 'opens':
key = 'Other'
if t.email_client:
key = t.email_client
elif t.browser_os:
key = t.browser_os
if key in self.clients:
self.clients[key] += 1
else:
self.clients[key] = 1
def sort_data (self, ptype):
keys = self.temp.keys()
keys.sort()
perm = []
for k in keys:
perm.append((k, self.temp[k]))
setattr(self, ptype, perm)
def process (self, ptype):
ttype = ptype[:-1]
cursor = None
total = 0
self.temp = {}
if ptype == 'clicks':
self.tags = {}
self.urls = {}
if ptype == 'opens':
self.clients = {}
self.total_sends = 0
from bulkmail.api.models import Campaign
c = Campaign.query(Campaign.campaign_id == self.campaign_id, Campaign.list_id == self.list_id).get()
for key in c.send_data:
sd = key.get()
self.total_sends += len(sd.data)
while 1:
tracks, cursor, more = Track.query(
Track.list_id == self.list_id,
Track.campaign_id == self.campaign_id,
Track.ttype == ttype,
).fetch_page(100, start_cursor=cursor)
for t in tracks:
self.process_track(t, ptype)
total += 1
if more and cursor:
continue
else:
break
setattr(self, 'total_' + ptype, total)
self.sort_data(ptype)
WEB_CLIENTS = (
('google.com', 'GMail'),
('yahoo.com', 'Yahoo'),
('live.com', 'Outlook.com'),
)
EMAIL_CLIENTS = (
('Outlook', 'Outlook'),
)
class Track (ndb.Model):
ttype = ndb.StringProperty() #open, click, image
list_id = ndb.StringProperty()
campaign_id = ndb.StringProperty()
user_agent = ndb.StringProperty(required=False)
referer = ndb.StringProperty(required=False)
browser_os = ndb.StringProperty(required=False)
browser_name = ndb.StringProperty(required=False)
browser_version = ndb.IntegerProperty(required=False)
email_client = ndb.StringProperty(required=False)
email = ndb.StringProperty(required=False)
url = ndb.KeyProperty(kind=Url, required=False)
tags = ndb.StringProperty(repeated=True, required=False)
created = ndb.DateTimeProperty(auto_now_add=True)
def detect_browser (self):
if self.user_agent:
for client in EMAIL_CLIENTS:
if client[0] in self.user_agent:
self.email_client = client[1]
break
b = httpagentparser.detect(self.user_agent)
if 'dist' in b and 'name' in b['dist']:
self.browser_os = b['dist']['name']
elif 'os' in b and 'name' in b['os']:
self.browser_os = b['os']['name']
if 'browser' in b:
if 'name' in b['browser']:
self.browser_name = b['browser']['name']
if 'version' in b['browser']:
try:
self.browser_version = int(b['browser']['version'].split('.')[0])
except:
pass
if self.referer:
for client in WEB_CLIENTS:
if client[0] in self.referer:
self.email_client = client[1]
break
| pizzapanther/GAE-Bulk-Mailer | bulkmail/tracking/models.py | Python | bsd-2-clause | 6,668 |
#!/usr/bin/env python
# May God Bless Us All
import struct
import sys
import subprocess
import re
import os
import json
# Constants
COMMENT_STR = "\n\tProblem Name = {0}\n\tProblem Link = {1}\n\tUser = {2}\n"
PY_COMMENT_START = "'''"
PY_COMMENT_END = "'''"
CPP_JAVA_START = "/*"
CPP_JAVA_END = "*/"
IDE = {
'c' : 'C_IDE',
'cpp' : 'CPP_IDE',
'java' : 'JAVA_IDE',
'py': 'PYTHON_IDE'
}
def construct_heading_comment(prob):
'''
This function constructs the heading comment on the solution file depending
on the problem parameters
Args:
prob: Problem JSON object
Return:
A properly formatted comment string
'''
# Adding problem parameters to comment
comment = COMMENT_STR.format(
prob['problem_name'],
prob['problem_url'],
prob['user_name'],
)
# Enclosing comments using appropriate syntax depending on langauge
if prob['lang'] == 'py':
comment = PY_COMMENT_START + comment + PY_COMMENT_END + '\n\n'
else:
comment = CPP_JAVA_START + comment + CPP_JAVA_END + '\n\n'
print comment
return comment
# Helper function that sends a message to the chrome-plugin.
def send_message(message):
'''
This function writes a message to the stdout which can be read by Chrome
Args:
string: The message string to be written
'''
message = '{"msg": "%s"}' % message
sys.stdout.write(struct.pack('I', len(message)))
sys.stdout.write(message)
sys.stdout.flush()
# Function that reads messages from the chrome-plugin
def read_func():
'''
This function read the message sent by chrome and takes required action
'''
# Read Message from stdin
text_length_bytes = sys.stdin.read(4)
if len(text_length_bytes) == 0:
sys.exit(0)
# Parse the message
text_length = struct.unpack('i', text_length_bytes)[0]
text = sys.stdin.read(text_length).decode('utf-8')
prob = json.loads(text)
# Format Problem Name so that it's suitable for file name
prob['problem_name'] = re.sub('[ ]+', ' ', prob['problem_name'])
filename = re.sub(' ', '_', prob['problem_name'])
filename = "DEFAULT_SOLUTION_PATH" + os.sep + filename + "." + prob['lang']
try:
if not os.path.isfile(filename):
# Create Solution File
file_content = construct_heading_comment(prob)
file_name = '{0}_template.{0}'.format(prob['lang'])
file_dir = os.path.dirname(__file__)
with open(os.path.join(file_dir, file_name), "r") as template_file:
file_content = file_content + template_file.read()
file_stream = open(filename, "w")
file_stream.write(file_content)
file_stream.close()
# Open the file using the IDE
subprocess.Popen([IDE[prob['lang']], filename])
send_message("Subprocess started for %s with file %s" %
(IDE[prob['lang']], filename))
except Exception, err: #pylint: disable=W0703
send_message("Unable to start Subprocess!" + str(err))
if __name__ == '__main__':
read_func()
sys.exit(0)
| ashish1294/code-now-CodeChef | host-program/prog.py | Python | apache-2.0 | 2,976 |
"""taskbuster URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from .views import home, home_files
from .views import home
# urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
# url(r'^$', home, name='home'),
# ]
urlpatterns = [
url(r'^(?P<filename>(robots.txt)|(humans.txt))$',
home_files, name='home-files'),
]
urlpatterns += i18n_patterns(
url(r'^$', home, name='home'),
url(r'^admin/', include(admin.site.urls)),
) | joshialonzo/taskbuster-boilerplate | taskbuster/urls.py | Python | mit | 1,140 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
WorkDir = "bc-1.06"
def setup():
autotools.configure("--with-readline")
def build():
autotools.make()
def install():
pisitools.dobin("bc/bc")
pisitools.dobin("dc/dc")
pisitools.doman("man/*.1")
pisitools.doinfo("doc/*.info")
pisitools.dodoc("AUTHORS", "FAQ", "NEWS", "README", "ChangeLog")
| examachine/pisi | tests/stress/lang/ef/actions.py | Python | gpl-3.0 | 602 |
from __future__ import division, print_function, absolute_import
from .storage import LabeledDataStorage
| yandex/rep | rep/data/__init__.py | Python | apache-2.0 | 106 |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from functools import partial
from datashape import discover
from datashape import string, object_, datetime_, Option
import datashape
import pandas as pd
import numpy as np
from ..convert import convert
possibly_missing = frozenset({string, datetime_})
def dshape_from_pandas(dtype):
dshape = datashape.CType.from_numpy_dtype(dtype)
dshape = string if dshape == object_ else dshape
return Option(dshape) if dshape in possibly_missing else dshape
@discover.register(pd.DataFrame)
def discover_dataframe(df):
return len(df) * datashape.Record(
zip(df.columns, map(dshape_from_pandas, df.dtypes)),
)
@discover.register(pd.Series)
def discover_series(s):
return len(s) * dshape_from_pandas(s.dtype)
def coerce_datetimes(df):
""" Make object columns into datetimes if possible
Warning: this operates inplace.
Example
-------
>>> df = pd.DataFrame({'dt': ['2014-01-01'], 'name': ['Alice']})
>>> df.dtypes # note that these are strings/object
dt object
name object
dtype: object
>>> df2 = coerce_datetimes(df)
>>> df2
dt name
0 2014-01-01 Alice
>>> df2.dtypes # note that only the datetime-looking-one was transformed
dt datetime64[ns]
name object
dtype: object
"""
objects = df.select_dtypes(include=['object'])
# NOTE: In pandas < 0.17, pd.to_datetime(' ') == datetime(...), which is
# not what we want. So we have to remove columns with empty or
# whitespace-only strings to prevent erroneous datetime coercion.
columns = [
c for c in objects.columns
if not np.any(objects[c].str.isspace() | objects[c].str.isalpha())
]
df2 = objects[columns].apply(partial(pd.to_datetime, errors='ignore'))
for c in df2.columns:
df[c] = df2[c]
return df
@convert.register(pd.Timestamp, datetime)
def convert_datetime_to_timestamp(dt, **kwargs):
return pd.Timestamp(dt)
@convert.register(pd.Timestamp, float)
def nan_to_nat(fl, **kwargs):
try:
if np.isnan(fl):
# Only nan->nat edge
return pd.NaT
except TypeError:
pass
raise NotImplementedError()
@convert.register(pd.Timestamp, (pd.tslib.NaTType, type(None)))
def convert_null_or_nat_to_nat(n, **kwargs):
return pd.NaT
| cpcloud/odo | odo/backends/pandas.py | Python | bsd-3-clause | 2,435 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from perfis.views import Perfil
# Create your views here.
def index(request):
# busca do perfil atual no banco
return render(request, 'index.html')
def localizacao(request):
# definir aqui página de localização
pass | andreufsm/intranet-dead | sobre/views.py | Python | gpl-3.0 | 370 |
from django.apps import AppConfig
from django.conf import settings
from grouprise import __release__
class CoreConfig(AppConfig):
name = 'grouprise.core'
def ready(self):
self.module.autodiscover()
# configure sentry if DSN has been defined
if settings.SENTRY_DSN:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=settings.SENTRY_DSN,
integrations=[DjangoIntegration()],
send_default_pii=True,
environment=settings.ENVIRONMENT,
release=f'grouprise@{__release__}',
)
| stadtgestalten/stadtgestalten | grouprise/core/apps.py | Python | agpl-3.0 | 682 |
# encoding: utf-8
"""nanotube.py - Window for setting up Carbon nanotubes and similar tubes.
"""
import gtk
from ase.gui.widgets import pack, cancel_apply_ok, oops
from ase.gui.setupwindow import SetupWindow
from ase.gui.pybutton import PyButton
from ase.structure import nanotube
import ase
import numpy as np
introtext = """\
Set up a Carbon nanotube by specifying the (n,m) roll-up vector.
Please note that m <= n.
Nanotubes of other elements can be made by specifying the element
and bond length.\
"""
py_template = """
from ase.structure import nanotube
atoms = nanotube(%(n)i, %(m)i, length=%(length)i, bond=%(bl).3f, symbol='%(symb)s')
"""
class SetupNanotube(SetupWindow):
"Window for setting up a (Carbon) nanotube."
def __init__(self, gui):
SetupWindow.__init__(self)
self.set_title("Nanotube")
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
# Choose the element and bond length
label1 = gtk.Label("Element: ")
#label.set_alignment(0.0, 0.2)
self.element = gtk.Entry(max=3)
self.element.set_text("C")
self.element.connect('activate', self.update_element)
self.bondlength = gtk.Adjustment(1.42, 0.0, 1000.0, 0.01)
label2 = gtk.Label(" Bond length: ")
label3 = gtk.Label("Å")
bond_box = gtk.SpinButton(self.bondlength, 10.0, 3)
pack(vbox, [label1, self.element, label2, bond_box, label3])
self.elementinfo = gtk.Label("")
self.elementinfo.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('#FF0000'))
pack(vbox, [self.elementinfo])
pack(vbox, gtk.Label(""))
# Choose the structure.
pack(vbox, [gtk.Label("Select roll-up vector (n,m) and tube length:")])
label1 = gtk.Label("n: ")
label2 = gtk.Label(" m: ")
self.n = gtk.Adjustment(5, 1, 100, 1)
self.m = gtk.Adjustment(5, 0, 100, 1)
spinn = gtk.SpinButton(self.n, 0, 0)
spinm = gtk.SpinButton(self.m, 0, 0)
label3 = gtk.Label(" Length: ")
self.length = gtk.Adjustment(1, 1, 100, 1)
spinl = gtk.SpinButton(self.length, 0, 0)
pack(vbox, [label1, spinn, label2, spinm, label3, spinl])
self.err = gtk.Label("")
self.err.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse('#FF0000'))
pack(vbox, [self.err])
pack(vbox, gtk.Label(""))
self.n.connect('value-changed', self.update_n)
self.m.connect('value-changed', self.update_m)
# Buttons
self.pybut = PyButton("Creating a nanoparticle.")
self.pybut.connect('clicked', self.makeatoms)
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [self.pybut, buts], end=True, bottom=True)
# Finalize setup
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
def update_n(self, *args):
if self.m.value > self.n.value:
self.m.value = self.n.value
self.err.set_text("m decreased! (m may not be larger than n.)")
else:
self.err.set_text("")
def update_m(self, *args):
if self.m.value > self.n.value:
self.n.value = self.m.value
self.err.set_text("n increased! (m may not be larger than n.)")
else:
self.err.set_text("")
def update_element(self, *args):
"Called when a new element may have been entered."
# Assumes the element widget is self.element and that a label
# for errors is self.elementinfo. The chemical symbol is
# placed in self.legalelement - or None if the element is
# invalid.
elem = self.element.get_text()
if not elem:
self.invalid_element(" No element specified!")
return False
try:
z = int(elem)
except ValueError:
# Probably a symbol
try:
z = ase.data.atomic_numbers[elem]
except KeyError:
self.invalid_element()
return False
try:
symb = ase.data.chemical_symbols[z]
except KeyError:
self.invalid_element()
return False
self.elementinfo.set_text("")
self.legal_element = symb
return True
def makeatoms(self, *args):
self.update_element()
if self.legal_element is None:
self.atoms = None
self.pybut.python = None
else:
n = int(self.n.value)
m = int(self.m.value)
symb = self.legal_element
length = int(self.length.value)
bl = self.bondlength.value
self.atoms = nanotube(n, m, length=length, bond=bl, symbol=symb)
self.pybut.python = py_template % {'n': n, 'm':m, 'length':length,
'symb':symb, 'bl':bl}
def apply(self, *args):
self.makeatoms()
if self.atoms is not None:
self.gui.new_atoms(self.atoms)
return True
else:
oops("No valid atoms.",
"You have not (yet) specified a consistent set of parameters.")
return False
def ok(self, *args):
if self.apply():
self.destroy()
| slabanja/ase | ase/gui/nanotube.py | Python | gpl-2.0 | 5,499 |
"""Test songpal media_player."""
from datetime import timedelta
import logging
from unittest.mock import AsyncMock, MagicMock, call, patch
from songpal import (
ConnectChange,
ContentChange,
PowerChange,
SongpalException,
VolumeChange,
)
from homeassistant.components import media_player, songpal
from homeassistant.components.songpal.const import SET_SOUND_SETTING
from homeassistant.components.songpal.media_player import SUPPORT_SONGPAL
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from . import (
CONF_DATA,
CONF_ENDPOINT,
CONF_NAME,
ENDPOINT,
ENTITY_ID,
FRIENDLY_NAME,
MAC,
MODEL,
SW_VERSION,
_create_mocked_device,
_patch_media_player_device,
)
from tests.common import MockConfigEntry, async_fire_time_changed
def _get_attributes(hass):
state = hass.states.get(ENTITY_ID)
return state.as_dict()["attributes"]
async def test_setup_platform(hass):
"""Test the legacy setup platform."""
mocked_device = _create_mocked_device(throw_exception=True)
with _patch_media_player_device(mocked_device):
await async_setup_component(
hass,
media_player.DOMAIN,
{
media_player.DOMAIN: [
{
"platform": songpal.DOMAIN,
CONF_NAME: FRIENDLY_NAME,
CONF_ENDPOINT: ENDPOINT,
}
],
},
)
await hass.async_block_till_done()
# No device is set up
mocked_device.assert_not_called()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_failed(hass, caplog):
"""Test failed to set up the entity."""
mocked_device = _create_mocked_device(throw_exception=True)
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
warning_records = [x for x in caplog.records if x.levelno == logging.WARNING]
assert len(warning_records) == 2
assert not any(x.levelno == logging.ERROR for x in caplog.records)
caplog.clear()
utcnow = dt_util.utcnow()
type(mocked_device).get_supported_methods = AsyncMock()
with _patch_media_player_device(mocked_device):
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert not any(x.levelno == logging.WARNING for x in caplog.records)
assert not any(x.levelno == logging.ERROR for x in caplog.records)
async def test_state(hass):
"""Test state of the entity."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.name == FRIENDLY_NAME
assert state.state == STATE_ON
attributes = state.as_dict()["attributes"]
assert attributes["volume_level"] == 0.5
assert attributes["is_volume_muted"] is False
assert attributes["source_list"] == ["title1", "title2"]
assert attributes["source"] == "title2"
assert attributes["supported_features"] == SUPPORT_SONGPAL
device_registry = dr.async_get(hass)
device = device_registry.async_get_device(identifiers={(songpal.DOMAIN, MAC)})
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, MAC)}
assert device.manufacturer == "Sony Corporation"
assert device.name == FRIENDLY_NAME
assert device.sw_version == SW_VERSION
assert device.model == MODEL
entity_registry = er.async_get(hass)
entity = entity_registry.async_get(ENTITY_ID)
assert entity.unique_id == MAC
async def test_services(hass):
"""Test services."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
async def _call(service, **argv):
await hass.services.async_call(
media_player.DOMAIN,
service,
{"entity_id": ENTITY_ID, **argv},
blocking=True,
)
await _call(media_player.SERVICE_TURN_ON)
await _call(media_player.SERVICE_TURN_OFF)
await _call(media_player.SERVICE_TOGGLE)
assert mocked_device.set_power.call_count == 3
mocked_device.set_power.assert_has_calls([call(True), call(False), call(False)])
await _call(media_player.SERVICE_VOLUME_SET, volume_level=0.6)
await _call(media_player.SERVICE_VOLUME_UP)
await _call(media_player.SERVICE_VOLUME_DOWN)
assert mocked_device.volume1.set_volume.call_count == 3
mocked_device.volume1.set_volume.assert_has_calls([call(60), call(51), call(49)])
await _call(media_player.SERVICE_VOLUME_MUTE, is_volume_muted=True)
mocked_device.volume1.set_mute.assert_called_once_with(True)
await _call(media_player.SERVICE_SELECT_SOURCE, source="none")
mocked_device.input1.activate.assert_not_called()
await _call(media_player.SERVICE_SELECT_SOURCE, source="title1")
mocked_device.input1.activate.assert_called_once()
await hass.services.async_call(
songpal.DOMAIN,
SET_SOUND_SETTING,
{"entity_id": ENTITY_ID, "name": "name", "value": "value"},
blocking=True,
)
mocked_device.set_sound_settings.assert_called_once_with("name", "value")
mocked_device.set_sound_settings.reset_mock()
mocked_device2 = _create_mocked_device()
sys_info = MagicMock()
sys_info.macAddr = "mac2"
sys_info.version = SW_VERSION
type(mocked_device2).get_system_info = AsyncMock(return_value=sys_info)
entry2 = MockConfigEntry(
domain=songpal.DOMAIN, data={CONF_NAME: "d2", CONF_ENDPOINT: ENDPOINT}
)
entry2.add_to_hass(hass)
with _patch_media_player_device(mocked_device2):
await hass.config_entries.async_setup(entry2.entry_id)
await hass.async_block_till_done()
await hass.services.async_call(
songpal.DOMAIN,
SET_SOUND_SETTING,
{"entity_id": "all", "name": "name", "value": "value"},
blocking=True,
)
mocked_device.set_sound_settings.assert_called_once_with("name", "value")
mocked_device2.set_sound_settings.assert_called_once_with("name", "value")
async def test_websocket_events(hass):
"""Test websocket events."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mocked_device.listen_notifications.assert_called_once()
assert mocked_device.on_notification.call_count == 4
notification_callbacks = mocked_device.notification_callbacks
volume_change = MagicMock()
volume_change.mute = True
volume_change.volume = 20
await notification_callbacks[VolumeChange](volume_change)
attributes = _get_attributes(hass)
assert attributes["is_volume_muted"] is True
assert attributes["volume_level"] == 0.2
content_change = MagicMock()
content_change.is_input = False
content_change.uri = "uri1"
await notification_callbacks[ContentChange](content_change)
assert _get_attributes(hass)["source"] == "title2"
content_change.is_input = True
await notification_callbacks[ContentChange](content_change)
assert _get_attributes(hass)["source"] == "title1"
power_change = MagicMock()
power_change.status = False
await notification_callbacks[PowerChange](power_change)
assert hass.states.get(ENTITY_ID).state == STATE_OFF
async def test_disconnected(hass, caplog):
"""Test disconnected behavior."""
mocked_device = _create_mocked_device()
entry = MockConfigEntry(domain=songpal.DOMAIN, data=CONF_DATA)
entry.add_to_hass(hass)
with _patch_media_player_device(mocked_device):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
async def _assert_state():
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_UNAVAILABLE
connect_change = MagicMock()
connect_change.exception = "disconnected"
type(mocked_device).get_supported_methods = AsyncMock(
side_effect=[SongpalException(""), SongpalException(""), _assert_state]
)
notification_callbacks = mocked_device.notification_callbacks
with patch("homeassistant.components.songpal.media_player.INITIAL_RETRY_DELAY", 0):
await notification_callbacks[ConnectChange](connect_change)
warning_records = [x for x in caplog.records if x.levelno == logging.WARNING]
assert len(warning_records) == 2
assert warning_records[0].message.endswith("Got disconnected, trying to reconnect")
assert warning_records[1].message.endswith("Connection reestablished")
assert not any(x.levelno == logging.ERROR for x in caplog.records)
| lukas-hetzenecker/home-assistant | tests/components/songpal/test_media_player.py | Python | apache-2.0 | 9,705 |
# Generated by Django 2.0.2 on 2018-08-10 08:54
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('review', '0006_remove_review_review'),
('funds', '0040_add_duration_stream_to_streamfield_definition'),
]
operations = [
migrations.CreateModel(
name='RoundBaseReviewForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('form', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='review.ReviewForm')),
('round', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_forms', to='funds.RoundBase')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| OpenTechFund/WebApp | opentech/apply/funds/migrations/0041_roundbasereviewform.py | Python | gpl-2.0 | 1,079 |
""" The tests below don't use translation at all. They run the GCs by
instantiating them and asking them to allocate memory by calling their
methods directly. The tests need to maintain by hand what the GC should
see as the list of roots (stack and prebuilt objects).
"""
# XXX VERY INCOMPLETE, low coverage
import py
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.memory.gctypelayout import TypeLayoutBuilder
from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int
from rpython.memory.gc import minimark, incminimark
from rpython.memory.gctypelayout import zero_gc_pointers_inside, zero_gc_pointers
from rpython.rlib.debug import debug_print
import pdb
WORD = LONG_BIT // 8
ADDR_ARRAY = lltype.Array(llmemory.Address)
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('S',
('x', lltype.Signed),
('prev', lltype.Ptr(S)),
('next', lltype.Ptr(S))))
RAW = lltype.Struct('RAW', ('p', lltype.Ptr(S)), ('q', lltype.Ptr(S)))
VAR = lltype.GcArray(lltype.Ptr(S))
VARNODE = lltype.GcStruct('VARNODE', ('a', lltype.Ptr(VAR)))
class DirectRootWalker(object):
def __init__(self, tester):
self.tester = tester
def walk_roots(self, collect_stack_root,
collect_static_in_prebuilt_nongc,
collect_static_in_prebuilt_gc,
is_minor=False):
gc = self.tester.gc
layoutbuilder = self.tester.layoutbuilder
if collect_static_in_prebuilt_gc:
for addrofaddr in layoutbuilder.addresses_of_static_ptrs:
if addrofaddr.address[0]:
collect_static_in_prebuilt_gc(gc, addrofaddr)
if collect_static_in_prebuilt_nongc:
for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc:
if addrofaddr.address[0]:
collect_static_in_prebuilt_nongc(gc, addrofaddr)
if collect_stack_root:
stackroots = self.tester.stackroots
a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw')
for i in range(len(a)):
a[i] = llmemory.cast_ptr_to_adr(stackroots[i])
a_base = lltype.direct_arrayitems(a)
for i in range(len(a)):
ai = lltype.direct_ptradd(a_base, i)
collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai))
for i in range(len(a)):
PTRTYPE = lltype.typeOf(stackroots[i])
stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE)
lltype.free(a, flavor='raw')
def _walk_prebuilt_gc(self, callback):
pass
def finished_minor_collection(self):
pass
class BaseDirectGCTest(object):
GC_PARAMS = {}
def setup_method(self, meth):
from rpython.config.translationoption import get_combined_translation_config
config = get_combined_translation_config(translating=True).translation
self.stackroots = []
GC_PARAMS = self.GC_PARAMS.copy()
if hasattr(meth, 'GC_PARAMS'):
GC_PARAMS.update(meth.GC_PARAMS)
GC_PARAMS['translated_to_c'] = False
self.gc = self.GCClass(config, **GC_PARAMS)
self.gc.DEBUG = True
self.rootwalker = DirectRootWalker(self)
self.gc.set_root_walker(self.rootwalker)
self.layoutbuilder = TypeLayoutBuilder(self.GCClass)
self.get_type_id = self.layoutbuilder.get_type_id
self.layoutbuilder.initialize_gc_query_function(self.gc)
self.gc.setup()
def consider_constant(self, p):
obj = p._obj
TYPE = lltype.typeOf(obj)
self.layoutbuilder.consider_constant(TYPE, obj, self.gc)
def write(self, p, fieldname, newvalue):
if self.gc.needs_write_barrier:
addr_struct = llmemory.cast_ptr_to_adr(p)
self.gc.write_barrier(addr_struct)
setattr(p, fieldname, newvalue)
def writearray(self, p, index, newvalue):
if self.gc.needs_write_barrier:
addr_struct = llmemory.cast_ptr_to_adr(p)
if hasattr(self.gc, 'write_barrier_from_array'):
self.gc.write_barrier_from_array(addr_struct, index)
else:
self.gc.write_barrier(addr_struct)
p[index] = newvalue
def malloc(self, TYPE, n=None):
addr = self.gc.malloc(self.get_type_id(TYPE), n)
debug_print(self.gc)
obj_ptr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE))
if not self.gc.malloc_zero_filled:
zero_gc_pointers_inside(obj_ptr, TYPE)
return obj_ptr
class DirectGCTest(BaseDirectGCTest):
def test_simple(self):
p = self.malloc(S)
p.x = 5
self.stackroots.append(p)
self.gc.collect()
p = self.stackroots[0]
assert p.x == 5
def test_missing_stack_root(self):
p = self.malloc(S)
p.x = 5
self.gc.collect() # 'p' should go away
py.test.raises(RuntimeError, 'p.x')
def test_prebuilt_gc(self):
k = lltype.malloc(S, immortal=True)
k.x = 42
self.consider_constant(k)
self.write(k, 'next', self.malloc(S))
k.next.x = 43
self.write(k.next, 'next', self.malloc(S))
k.next.next.x = 44
self.gc.collect()
assert k.x == 42
assert k.next.x == 43
assert k.next.next.x == 44
def test_prebuilt_nongc(self):
raw = lltype.malloc(RAW, immortal=True)
self.consider_constant(raw)
raw.p = self.malloc(S)
raw.p.x = 43
raw.q = self.malloc(S)
raw.q.x = 44
self.gc.collect()
assert raw.p.x == 43
assert raw.q.x == 44
def test_many_objects(self):
def alloc2(i):
a1 = self.malloc(S)
a1.x = i
self.stackroots.append(a1)
a2 = self.malloc(S)
a1 = self.stackroots.pop()
a2.x = i + 1000
return a1, a2
def growloop(loop, a1, a2):
self.write(a1, 'prev', loop.prev)
self.write(a1.prev, 'next', a1)
self.write(a1, 'next', loop)
self.write(loop, 'prev', a1)
self.write(a2, 'prev', loop)
self.write(a2, 'next', loop.next)
self.write(a2.next, 'prev', a2)
self.write(loop, 'next', a2)
def newloop():
p = self.malloc(S)
p.next = p # initializing stores, no write barrier
p.prev = p
return p
# a loop attached to a stack root
self.stackroots.append(newloop())
# another loop attached to a prebuilt gc node
k = lltype.malloc(S, immortal=True)
k.next = k
k.prev = k
self.consider_constant(k)
# a third loop attached to a prebuilt nongc
raw = lltype.malloc(RAW, immortal=True)
self.consider_constant(raw)
raw.p = newloop()
# run!
for i in range(100):
a1, a2 = alloc2(i)
growloop(self.stackroots[0], a1, a2)
a1, a2 = alloc2(i)
growloop(k, a1, a2)
a1, a2 = alloc2(i)
growloop(raw.p, a1, a2)
def test_varsized_from_stack(self):
expected = {}
def verify():
for (index, index2), value in expected.items():
assert self.stackroots[index][index2].x == value
x = 0
for i in range(40):
assert 'DEAD' not in repr(self.stackroots)
a = self.malloc(VAR, i)
assert 'DEAD' not in repr(a)
self.stackroots.append(a)
print 'ADDED TO STACKROOTS:', llmemory.cast_adr_to_int(
llmemory.cast_ptr_to_adr(a))
assert 'DEAD' not in repr(self.stackroots)
for j in range(5):
assert 'DEAD' not in repr(self.stackroots)
p = self.malloc(S)
assert 'DEAD' not in repr(self.stackroots)
p.x = x
index = x % len(self.stackroots)
if index > 0:
index2 = (x / len(self.stackroots)) % index
a = self.stackroots[index]
assert len(a) == index
self.writearray(a, index2, p)
expected[index, index2] = x
x += 1291
verify()
self.gc.collect()
verify()
self.gc.collect()
verify()
def test_varsized_from_prebuilt_gc(self):
expected = {}
def verify():
for (index, index2), value in expected.items():
assert prebuilt[index].a[index2].x == value
x = 0
prebuilt = [lltype.malloc(VARNODE, immortal=True, zero=True)
for i in range(40)]
for node in prebuilt:
self.consider_constant(node)
for i in range(len(prebuilt)):
self.write(prebuilt[i], 'a', self.malloc(VAR, i))
for j in range(20):
p = self.malloc(S)
p.x = x
index = x % (i+1)
if index > 0:
index2 = (x / (i+1)) % index
a = prebuilt[index].a
assert len(a) == index
self.writearray(a, index2, p)
expected[index, index2] = x
x += 1291
verify()
self.gc.collect()
verify()
self.gc.collect()
verify()
def test_id(self):
ids = {}
def allocate_bunch(count=50):
base = len(self.stackroots)
for i in range(count):
p = self.malloc(S)
self.stackroots.append(p)
for i in range(count):
j = base + (i*1291) % count
pid = self.gc.id(self.stackroots[j])
assert isinstance(pid, int)
ids[j] = pid
def verify():
for j, expected in ids.items():
assert self.gc.id(self.stackroots[j]) == expected
allocate_bunch(5)
verify()
allocate_bunch(75)
verify()
allocate_bunch(5)
verify()
self.gc.collect()
verify()
self.gc.collect()
verify()
def test_identityhash(self):
# a "does not crash" kind of test
p_const = lltype.malloc(S, immortal=True)
self.consider_constant(p_const)
# (1) p is in the nursery
self.gc.collect()
p = self.malloc(S)
hash = self.gc.identityhash(p)
print hash
assert is_valid_int(hash)
assert hash == self.gc.identityhash(p)
self.stackroots.append(p)
for i in range(6):
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (2) p is an older object
p = self.malloc(S)
self.stackroots.append(p)
self.gc.collect()
hash = self.gc.identityhash(self.stackroots[-1])
print hash
assert is_valid_int(hash)
for i in range(6):
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (3) p is a gen3 object (for hybrid)
p = self.malloc(S)
self.stackroots.append(p)
for i in range(6):
self.gc.collect()
hash = self.gc.identityhash(self.stackroots[-1])
print hash
assert is_valid_int(hash)
for i in range(2):
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (4) p is a prebuilt object
hash = self.gc.identityhash(p_const)
print hash
assert is_valid_int(hash)
assert hash == self.gc.identityhash(p_const)
# (5) p is actually moving (for the markcompact gc only?)
p0 = self.malloc(S)
self.stackroots.append(p0)
p = self.malloc(S)
self.stackroots.append(p)
hash = self.gc.identityhash(p)
self.stackroots.pop(-2)
self.gc.collect() # p0 goes away, p shifts left
assert hash == self.gc.identityhash(self.stackroots[-1])
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (6) ask for the hash of varsized objects, larger and larger
for i in range(10):
self.gc.collect()
p = self.malloc(VAR, i)
self.stackroots.append(p)
hash = self.gc.identityhash(p)
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (7) the same, but the objects are dying young
for i in range(10):
self.gc.collect()
p = self.malloc(VAR, i)
self.stackroots.append(p)
hash1 = self.gc.identityhash(p)
hash2 = self.gc.identityhash(p)
assert hash1 == hash2
self.stackroots.pop()
def test_memory_alignment(self):
A1 = lltype.GcArray(lltype.Char)
for i in range(50):
p1 = self.malloc(A1, i)
if i:
p1[i-1] = chr(i)
self.stackroots.append(p1)
self.gc.collect()
for i in range(1, 50):
p = self.stackroots[-50+i]
assert p[i-1] == chr(i)
class TestSemiSpaceGC(DirectGCTest):
from rpython.memory.gc.semispace import SemiSpaceGC as GCClass
def test_shrink_array(self):
S1 = lltype.GcStruct('S1', ('h', lltype.Char),
('v', lltype.Array(lltype.Char)))
p1 = self.malloc(S1, 2)
p1.h = '?'
for i in range(2):
p1.v[i] = chr(50 + i)
addr = llmemory.cast_ptr_to_adr(p1)
ok = self.gc.shrink_array(addr, 1)
assert ok
assert p1.h == '?'
assert len(p1.v) == 1
for i in range(1):
assert p1.v[i] == chr(50 + i)
class TestGenerationGC(TestSemiSpaceGC):
from rpython.memory.gc.generation import GenerationGC as GCClass
def test_collect_gen(self):
gc = self.gc
old_semispace_collect = gc.semispace_collect
old_collect_nursery = gc.collect_nursery
calls = []
def semispace_collect():
calls.append('semispace_collect')
return old_semispace_collect()
def collect_nursery():
calls.append('collect_nursery')
return old_collect_nursery()
gc.collect_nursery = collect_nursery
gc.semispace_collect = semispace_collect
gc.collect()
assert calls == ['semispace_collect']
calls = []
gc.collect(0)
assert calls == ['collect_nursery']
calls = []
gc.collect(1)
assert calls == ['semispace_collect']
calls = []
gc.collect(9)
assert calls == ['semispace_collect']
calls = []
def test_write_barrier_direct(self):
s0 = lltype.malloc(S, immortal=True)
self.consider_constant(s0)
s = self.malloc(S)
s.x = 1
s0.next = s
self.gc.write_barrier(llmemory.cast_ptr_to_adr(s0))
self.gc.collect(0)
assert s0.next.x == 1
class TestHybridGC(TestGenerationGC):
from rpython.memory.gc.hybrid import HybridGC as GCClass
GC_PARAMS = {'space_size': 48*WORD,
'min_nursery_size': 12*WORD,
'nursery_size': 12*WORD,
'large_object': 3*WORD,
'large_object_gcptrs': 3*WORD,
'generation3_collect_threshold': 5,
}
def test_collect_gen(self):
gc = self.gc
old_semispace_collect = gc.semispace_collect
old_collect_nursery = gc.collect_nursery
calls = []
def semispace_collect():
gen3 = gc.is_collecting_gen3()
calls.append(('semispace_collect', gen3))
return old_semispace_collect()
def collect_nursery():
calls.append('collect_nursery')
return old_collect_nursery()
gc.collect_nursery = collect_nursery
gc.semispace_collect = semispace_collect
gc.collect()
assert calls == [('semispace_collect', True)]
calls = []
gc.collect(0)
assert calls == ['collect_nursery']
calls = []
gc.collect(1)
assert calls == [('semispace_collect', False)]
calls = []
gc.collect(2)
assert calls == [('semispace_collect', True)]
calls = []
gc.collect(9)
assert calls == [('semispace_collect', True)]
calls = []
def test_identityhash(self):
py.test.skip("does not support raw_mallocs(sizeof(S)+sizeof(hash))")
class TestMiniMarkGCSimple(DirectGCTest):
from rpython.memory.gc.minimark import MiniMarkGC as GCClass
from rpython.memory.gc.minimarktest import SimpleArenaCollection
# test the GC itself, providing a simple class for ArenaCollection
GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection}
def test_card_marker(self):
for arraylength in (range(4, 17)
+ [69] # 3 bytes
+ [300]): # 10 bytes
print 'array length:', arraylength
nums = {}
a = self.malloc(VAR, arraylength)
self.stackroots.append(a)
for i in range(50):
p = self.malloc(S)
p.x = -i
a = self.stackroots[-1]
index = (i*i) % arraylength
self.writearray(a, index, p)
nums[index] = p.x
#
for index, expected_x in nums.items():
assert a[index].x == expected_x
self.stackroots.pop()
test_card_marker.GC_PARAMS = {"card_page_indices": 4}
def test_writebarrier_before_copy(self):
largeobj_size = self.gc.nonlarge_max + 1
self.gc.next_major_collection_threshold = 99999.0
p_src = self.malloc(VAR, largeobj_size)
p_dst = self.malloc(VAR, largeobj_size)
# make them old
self.stackroots.append(p_src)
self.stackroots.append(p_dst)
self.gc.collect()
p_dst = self.stackroots.pop()
p_src = self.stackroots.pop()
#
addr_src = llmemory.cast_ptr_to_adr(p_src)
addr_dst = llmemory.cast_ptr_to_adr(p_dst)
hdr_src = self.gc.header(addr_src)
hdr_dst = self.gc.header(addr_dst)
#
assert hdr_src.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
#
res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
assert res
assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
#
hdr_src.tid &= ~minimark.GCFLAG_TRACK_YOUNG_PTRS # pretend we have young ptrs
res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
assert res # we optimized it
assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS == 0 # and we copied the flag
#
hdr_src.tid |= minimark.GCFLAG_TRACK_YOUNG_PTRS
hdr_dst.tid |= minimark.GCFLAG_TRACK_YOUNG_PTRS
hdr_src.tid |= minimark.GCFLAG_HAS_CARDS
hdr_src.tid |= minimark.GCFLAG_CARDS_SET
# hdr_dst.tid does not have minimark.GCFLAG_HAS_CARDS
res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
assert not res # there might be young ptrs, let ll_arraycopy to find them
def test_writebarrier_before_copy_preserving_cards(self):
from rpython.rtyper.lltypesystem import llarena
tid = self.get_type_id(VAR)
largeobj_size = self.gc.nonlarge_max + 1
self.gc.next_major_collection_threshold = 99999.0
addr_src = self.gc.external_malloc(tid, largeobj_size, alloc_young=True)
addr_dst = self.gc.external_malloc(tid, largeobj_size, alloc_young=True)
hdr_src = self.gc.header(addr_src)
hdr_dst = self.gc.header(addr_dst)
#
assert hdr_src.tid & minimark.GCFLAG_HAS_CARDS
assert hdr_dst.tid & minimark.GCFLAG_HAS_CARDS
#
self.gc.write_barrier_from_array(addr_src, 0)
index_in_third_page = int(2.5 * self.gc.card_page_indices)
assert index_in_third_page < largeobj_size
self.gc.write_barrier_from_array(addr_src, index_in_third_page)
#
assert hdr_src.tid & minimark.GCFLAG_CARDS_SET
addr_byte = self.gc.get_card(addr_src, 0)
assert ord(addr_byte.char[0]) == 0x01 | 0x04 # bits 0 and 2
#
res = self.gc.writebarrier_before_copy(addr_src, addr_dst,
0, 0, 2*self.gc.card_page_indices)
assert res
#
assert hdr_dst.tid & minimark.GCFLAG_CARDS_SET
addr_byte = self.gc.get_card(addr_dst, 0)
assert ord(addr_byte.char[0]) == 0x01 | 0x04 # bits 0 and 2
test_writebarrier_before_copy_preserving_cards.GC_PARAMS = {
"card_page_indices": 4}
class TestMiniMarkGCFull(DirectGCTest):
from rpython.memory.gc.minimark import MiniMarkGC as GCClass
class TestIncrementalMiniMarkGCSimple(TestMiniMarkGCSimple):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
def test_write_barrier_marking_simple(self):
for i in range(2):
curobj = self.malloc(S)
curobj.x = i
self.stackroots.append(curobj)
oldobj = self.stackroots[-1]
oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj))
assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0
self.gc.debug_gc_step_until(incminimark.STATE_MARKING)
oldobj = self.stackroots[-1]
# object shifted by minor collect
oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj))
assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0
self.gc.minor_collection()
self.gc.visit_all_objects_step(1)
assert oldhdr.tid & incminimark.GCFLAG_VISITED
#at this point the first object should have been processed
newobj = self.malloc(S)
self.write(oldobj,'next',newobj)
assert self.gc.header(self.gc.old_objects_pointing_to_young.tolist()[0]) == oldhdr
self.gc.minor_collection()
self.gc.debug_check_consistency()
def test_sweeping_simple(self):
assert self.gc.gc_state == incminimark.STATE_SCANNING
for i in range(2):
curobj = self.malloc(S)
curobj.x = i
self.stackroots.append(curobj)
self.gc.debug_gc_step_until(incminimark.STATE_SWEEPING)
oldobj = self.stackroots[-1]
oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj))
assert oldhdr.tid & incminimark.GCFLAG_VISITED
newobj1 = self.malloc(S)
newobj2 = self.malloc(S)
newobj1.x = 1337
newobj2.x = 1338
self.write(oldobj,'next',newobj1)
self.gc.debug_gc_step_until(incminimark.STATE_SCANNING)
#should not be cleared even though it was allocated while sweeping
newobj1 = oldobj.next
assert newobj1.x == 1337
def test_obj_on_escapes_on_stack(self):
obj0 = self.malloc(S)
self.stackroots.append(obj0)
obj0.next = self.malloc(S)
self.gc.debug_gc_step_until(incminimark.STATE_MARKING)
obj0 = self.stackroots[-1]
obj1 = obj0.next
obj1.x = 13
obj0.next = lltype.nullptr(S)
self.stackroots.append(obj1)
self.gc.debug_gc_step_until(incminimark.STATE_SCANNING)
assert self.stackroots[1].x == 13
class TestIncrementalMiniMarkGCFull(DirectGCTest):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
def test_malloc_fixedsize_no_cleanup(self):
p = self.malloc(S)
import pytest
#ensure the memory is uninitialized
with pytest.raises(lltype.UninitializedMemoryAccess):
x1 = p.x
#ensure all the ptr fields are zeroed
assert p.prev == lltype.nullptr(S)
assert p.next == lltype.nullptr(S)
def test_malloc_varsize_no_cleanup(self):
x = lltype.Signed
VAR1 = lltype.GcArray(x)
p = self.malloc(VAR1,5)
import pytest
with pytest.raises(lltype.UninitializedMemoryAccess):
assert isinstance(p[0], lltype._uninitialized)
x1 = p[0]
def test_malloc_varsize_no_cleanup2(self):
#as VAR is GcArray so the ptr will don't need to be zeroed
p = self.malloc(VAR, 100)
for i in range(100):
assert p[i] == lltype.nullptr(S)
def test_malloc_varsize_no_cleanup3(self):
VAR1 = lltype.Array(lltype.Ptr(S))
p1 = lltype.malloc(VAR1, 10, flavor='raw', track_allocation=False)
import pytest
with pytest.raises(lltype.UninitializedMemoryAccess):
for i in range(10):
assert p1[i] == lltype.nullptr(S)
p1[i]._free()
p1._free()
def test_malloc_struct_of_ptr_struct(self):
S3 = lltype.GcForwardReference()
S3.become(lltype.GcStruct('S3',
('gcptr_struct', S),
('prev', lltype.Ptr(S)),
('next', lltype.Ptr(S))))
s3 = self.malloc(S3)
assert s3.gcptr_struct.prev == lltype.nullptr(S)
assert s3.gcptr_struct.next == lltype.nullptr(S)
def test_malloc_array_of_ptr_struct(self):
ARR_OF_PTR_STRUCT = lltype.GcArray(lltype.Ptr(S))
arr_of_ptr_struct = self.malloc(ARR_OF_PTR_STRUCT,5)
for i in range(5):
assert arr_of_ptr_struct[i] == lltype.nullptr(S)
assert arr_of_ptr_struct[i] == lltype.nullptr(S)
arr_of_ptr_struct[i] = self.malloc(S)
assert arr_of_ptr_struct[i].prev == lltype.nullptr(S)
assert arr_of_ptr_struct[i].next == lltype.nullptr(S)
#fail for now
def xxx_test_malloc_array_of_ptr_arr(self):
ARR_OF_PTR_ARR = lltype.GcArray(lltype.Ptr(lltype.GcArray(lltype.Ptr(S))))
arr_of_ptr_arr = self.malloc(ARR_OF_PTR_ARR, 10)
self.stackroots.append(arr_of_ptr_arr)
for i in range(10):
assert arr_of_ptr_arr[i] == lltype.nullptr(lltype.GcArray(lltype.Ptr(S)))
for i in range(10):
self.writearray(arr_of_ptr_arr, i,
self.malloc(lltype.GcArray(lltype.Ptr(S)), i))
#self.stackroots.append(arr_of_ptr_arr[i])
#debug_print(arr_of_ptr_arr[i])
for elem in arr_of_ptr_arr[i]:
#self.stackroots.append(elem)
assert elem == lltype.nullptr(S)
elem = self.malloc(S)
assert elem.prev == lltype.nullptr(S)
assert elem.next == lltype.nullptr(S)
| jptomo/rpython-lang-scheme | rpython/memory/gc/test/test_direct.py | Python | mit | 27,041 |
import math, os
from bup import _helpers, helpers
from bup.helpers import sc_page_size
_fmincore = getattr(helpers, 'fmincore', None)
BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
BLOB_READ_SIZE = 1024*1024
MAX_PER_TREE = 256
progress_callback = None
fanout = 16
GIT_MODE_FILE = 0100644
GIT_MODE_TREE = 040000
GIT_MODE_SYMLINK = 0120000
assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
# The purpose of this type of buffer is to avoid copying on peek(), get(),
# and eat(). We do copy the buffer contents on put(), but that should
# be ok if we always only put() large amounts of data at a time.
class Buf:
def __init__(self):
self.data = ''
self.start = 0
def put(self, s):
if s:
self.data = buffer(self.data, self.start) + s
self.start = 0
def peek(self, count):
return buffer(self.data, self.start, count)
def eat(self, count):
self.start += count
def get(self, count):
v = buffer(self.data, self.start, count)
self.start += count
return v
def used(self):
return len(self.data) - self.start
def _fadvise_pages_done(fd, first_page, count):
assert(first_page >= 0)
assert(count >= 0)
if count > 0:
_helpers.fadvise_done(fd,
first_page * sc_page_size,
count * sc_page_size)
def _nonresident_page_regions(status_bytes, max_region_len=None):
"""Return (start_page, count) pairs in ascending start_page order for
each contiguous region of nonresident pages indicated by the
mincore() status_bytes. Limit the number of pages in each region
to max_region_len."""
assert(max_region_len is None or max_region_len > 0)
start = None
for i, x in enumerate(status_bytes):
in_core = x & helpers.MINCORE_INCORE
if start is None:
if not in_core:
start = i
else:
count = i - start
if in_core:
yield (start, count)
start = None
elif max_region_len and count >= max_region_len:
yield (start, count)
start = i
if start is not None:
yield (start, len(status_bytes) - start)
def _uncache_ours_upto(fd, offset, first_region, remaining_regions):
"""Uncache the pages of fd indicated by first_region and
remaining_regions that are before offset, where each region is a
(start_page, count) pair. The final region must have a start_page
of None."""
rstart, rlen = first_region
while rstart is not None and (rstart + rlen) * sc_page_size <= offset:
_fadvise_pages_done(fd, rstart, rlen)
rstart, rlen = next(remaining_regions, (None, None))
return (rstart, rlen)
def readfile_iter(files, progress=None):
for filenum,f in enumerate(files):
ofs = 0
b = ''
fd = rpr = rstart = rlen = None
if _fmincore and hasattr(f, 'fileno'):
fd = f.fileno()
max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size)
rpr = _nonresident_page_regions(_fmincore(fd), max_chunk)
rstart, rlen = next(rpr, (None, None))
while 1:
if progress:
progress(filenum, len(b))
b = f.read(BLOB_READ_SIZE)
ofs += len(b)
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
if not b:
break
yield b
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
def _splitbuf(buf, basebits, fanbits):
while 1:
b = buf.peek(buf.used())
(ofs, bits) = _helpers.splitbuf(b)
if ofs:
if ofs > BLOB_MAX:
ofs = BLOB_MAX
level = 0
else:
level = (bits-basebits)//fanbits # integer division
buf.eat(ofs)
yield buffer(b, 0, ofs), level
else:
break
while buf.used() >= BLOB_MAX:
# limit max blob size
yield buf.get(BLOB_MAX), 0
def _hashsplit_iter(files, progress):
assert(BLOB_READ_SIZE > BLOB_MAX)
basebits = _helpers.blobbits()
fanbits = int(math.log(fanout or 128, 2))
buf = Buf()
for inblock in readfile_iter(files, progress):
buf.put(inblock)
for buf_and_level in _splitbuf(buf, basebits, fanbits):
yield buf_and_level
if buf.used():
yield buf.get(buf.used()), 0
def _hashsplit_iter_keep_boundaries(files, progress):
for real_filenum,f in enumerate(files):
if progress:
def prog(filenum, nbytes):
# the inner _hashsplit_iter doesn't know the real file count,
# so we'll replace it here.
return progress(real_filenum, nbytes)
else:
prog = None
for buf_and_level in _hashsplit_iter([f], progress=prog):
yield buf_and_level
def hashsplit_iter(files, keep_boundaries, progress):
if keep_boundaries:
return _hashsplit_iter_keep_boundaries(files, progress)
else:
return _hashsplit_iter(files, progress)
total_split = 0
def split_to_blobs(makeblob, files, keep_boundaries, progress):
global total_split
for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
sha = makeblob(blob)
total_split += len(blob)
if progress_callback:
progress_callback(len(blob))
yield (sha, len(blob), level)
def _make_shalist(l):
ofs = 0
l = list(l)
total = sum(size for mode,sha,size, in l)
vlen = len('%x' % total)
shalist = []
for (mode, sha, size) in l:
shalist.append((mode, '%0*x' % (vlen,ofs), sha))
ofs += size
assert(ofs == total)
return (shalist, total)
def _squish(maketree, stacks, n):
i = 0
while i < n or len(stacks[i]) >= MAX_PER_TREE:
while len(stacks) <= i+1:
stacks.append([])
if len(stacks[i]) == 1:
stacks[i+1] += stacks[i]
elif stacks[i]:
(shalist, size) = _make_shalist(stacks[i])
tree = maketree(shalist)
stacks[i+1].append((GIT_MODE_TREE, tree, size))
stacks[i] = []
i += 1
def split_to_shalist(makeblob, maketree, files,
keep_boundaries, progress=None):
sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
assert(fanout != 0)
if not fanout:
shal = []
for (sha,size,level) in sl:
shal.append((GIT_MODE_FILE, sha, size))
return _make_shalist(shal)[0]
else:
stacks = [[]]
for (sha,size,level) in sl:
stacks[0].append((GIT_MODE_FILE, sha, size))
_squish(maketree, stacks, level)
#log('stacks: %r\n' % [len(i) for i in stacks])
_squish(maketree, stacks, len(stacks)-1)
#log('stacks: %r\n' % [len(i) for i in stacks])
return _make_shalist(stacks[-1])[0]
def split_to_blob_or_tree(makeblob, maketree, files,
keep_boundaries, progress=None):
shalist = list(split_to_shalist(makeblob, maketree,
files, keep_boundaries, progress))
if len(shalist) == 1:
return (shalist[0][0], shalist[0][2])
elif len(shalist) == 0:
return (GIT_MODE_FILE, makeblob(''))
else:
return (GIT_MODE_TREE, maketree(shalist))
def open_noatime(name):
fd = _helpers.open_noatime(name)
try:
return os.fdopen(fd, 'rb', 1024*1024)
except:
try:
os.close(fd)
except:
pass
raise
| jbaber/bup | lib/bup/hashsplit.py | Python | lgpl-2.1 | 7,757 |
# The number of operations executed by algorithms A and B is 40n**2 and 2n**3 , respectively.
# Determine n0 such that A is better than B for n ≥ n0.
from sympy import symbols, integrate, Rational, lambdify, solve, ln, log, sqrt, diff
import matplotlib.pyplot as plt
import numpy as np
g_xlim = [ 0.1, 25 ]
n = symbols( 'n' )
def plot_func( func ):
x_vals = np.linspace( g_xlim[0], g_xlim[1], 1000, endpoint=True )
lam_f = lambdify( n, func, np )
y_vals = lam_f( x_vals )
plt.plot( x_vals, y_vals, label = str(func) )
fig = plt.figure()
ax = plt.subplot(111)
A = 40*n**2
B = 2*n**3
plot_func(A)
plot_func(B)
ax.legend()
plt.title('Asymptotic Growth')
plt.show() | bmoretz/Algorithms | Python/ch_03/reinforcement/r_3.3.py | Python | gpl-3.0 | 677 |
#!/usr/bin/python
#
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Neural Network Image Compression Encoder.
Compresses an image to a binarized numpy array. The image must be padded to a
multiple of 32 pixels in height and width.
Example usage:
python encoder.py --input_image=/your/image/here.png \
--output_codes=output_codes.pkl --iteration=15 --model=residual_gru.pb
"""
import io
import os
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string('input_image', None, 'Location of input image. We rely '
'on tf.image to decode the image, so only PNG and JPEG '
'formats are currently supported.')
tf.flags.DEFINE_integer('iteration', 15, 'Quality level for encoding image. '
'Must be between 0 and 15 inclusive.')
tf.flags.DEFINE_string('output_codes', None, 'File to save output encoding.')
tf.flags.DEFINE_string('model', None, 'Location of compression model.')
FLAGS = tf.flags.FLAGS
def get_output_tensor_names():
name_list = ['GruBinarizer/SignBinarizer/Sign:0']
for i in range(1, 16):
name_list.append('GruBinarizer/SignBinarizer/Sign_{}:0'.format(i))
return name_list
def main(_):
if (FLAGS.input_image is None or FLAGS.output_codes is None or
FLAGS.model is None):
print('\nUsage: python encoder.py --input_image=/your/image/here.png '
'--output_codes=output_codes.pkl --iteration=15 '
'--model=residual_gru.pb\n\n')
return
if FLAGS.iteration < 0 or FLAGS.iteration > 15:
print('\n--iteration must be between 0 and 15 inclusive.\n')
return
with tf.gfile.FastGFile(FLAGS.input_image) as input_image:
input_image_str = input_image.read()
with tf.Graph().as_default() as graph:
# Load the inference model for encoding.
with tf.gfile.FastGFile(FLAGS.model, 'rb') as model_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(model_file.read())
_ = tf.import_graph_def(graph_def, name='')
input_tensor = graph.get_tensor_by_name('Placeholder:0')
outputs = [graph.get_tensor_by_name(name) for name in
get_output_tensor_names()]
input_image = tf.placeholder(tf.string)
_, ext = os.path.splitext(FLAGS.input_image)
if ext == '.png':
decoded_image = tf.image.decode_png(input_image, channels=3)
elif ext == '.jpeg' or ext == '.jpg':
decoded_image = tf.image.decode_jpeg(input_image, channels=3)
else:
assert False, 'Unsupported file format {}'.format(ext)
decoded_image = tf.expand_dims(decoded_image, 0)
with tf.Session(graph=graph) as sess:
img_array = sess.run(decoded_image, feed_dict={input_image:
input_image_str})
results = sess.run(outputs, feed_dict={input_tensor: img_array})
results = results[0:FLAGS.iteration + 1]
int_codes = np.asarray([x.astype(np.int8) for x in results])
# Convert int codes to binary.
int_codes = (int_codes + 1)//2
export = np.packbits(int_codes.reshape(-1))
output = io.BytesIO()
np.savez_compressed(output, shape=int_codes.shape, codes=export)
with tf.gfile.FastGFile(FLAGS.output_codes, 'w') as code_file:
code_file.write(output.getvalue())
if __name__ == '__main__':
tf.app.run()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/compression/image_encoder/encoder.py | Python | bsd-2-clause | 3,909 |
#!/usr/bin/python
from .pyrapidpush import PyRapidPush | prdatur/PyRapidPush | pyrapidpush/__init__.py | Python | gpl-2.0 | 55 |
"""
This module contains class definitions for open ai gym environments.
"""
import os
import collections
import argparse
import random
from datetime import datetime
import time
from functools import reduce
import numpy as np
import tensorflow as tf
from qnetwork import DeepQNetwork, update_target_network
from replay_memory import ReplayMemory, ScreenHistory
from agent import QAgent
import random
def train(params):
# https://stackoverflow.com/questions/11526975/set-random-seed-programwide-in-python
# https://stackoverflow.com/questions/30517513/global-seed-for-multiple-numpy-imports
random.seed(params.seed)
np.random.seed(params.seed)
# Must be called before Session
# https://stackoverflow.com/questions/38469632/tensorflow-non-repeatable-results/40247201#40247201
tf.set_random_seed(params.seed)
qagent = QAgent(params)
if params.is_train:
qagent.fit()
elif params.env == 'atari':
qagent.play()
elif params.env == 'minesweeper':
qagent.evaluate_ms()
# View tensorboard with
# tensorboard --logdir output
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="train.py", description="Train Deep Q-Network for Atari games")
# Atari ROM, TensorFlow model and output directory
parser.add_argument('--rom', dest='rom_file', default="./roms/Breakout.bin", type=str, help="path to atari rom (.bin) file")
parser.add_argument('--model', dest='model_file', type=str, required=False, help="path to TensorFlow model file")
parser.add_argument('--out', dest='output_dir', type=str, default="./output/", help="output path models and screen captures")
parser.add_argument('--train', dest="is_train", action="store_true", help="training or only playing")
parser.add_argument('--randomstart', dest='random_start_wait', type=int, default=30, help="random number of frames to wait at start of episode")
parser.add_argument('--game', dest='game', type=str, default="DemonAttack-v0", help="The game we play")
parser.add_argument('--env', dest='env', type=str, default="atari", help="If we want to use atari or minesweeper")
parser.add_argument('--gpumemory', dest="gpu_memory", type=float, default=0.5, help="The percentage of GPU memory allowed to be used by Tensorflow")
# Parameters network input (screens)
parser.add_argument('--inputheight', dest="input_height", type=int, default=84, help="screen input height")
parser.add_argument('--inputwidth', dest="input_width", type=int, default=84, help="screen input width")
parser.add_argument('--historylength', dest="history_length", type=int, default=4, help="Numbe of moves which are repeated in atari")
parser.add_argument('--mines-min', dest="mines_min", type=int, default=5, help="The number of mines")
parser.add_argument('--mines-max', dest="mines_max", type=int, default=7, help="The number of mines")
parser.add_argument('--nchannels', dest="nchannels", type=int, default=4, help="screen input depth")
parser.add_argument('--network-type', dest='network_type', type=str, default="conv", help="conv|fc")
# Parameters CNN architecture
parser.add_argument('--filtersizes', dest="filter_sizes", type=str, default="8,4,3", help="CNN filter sizes")
parser.add_argument('--filterstrides', dest="filter_strides", type=str, default="4,2,1", help="CNN filter strides")
parser.add_argument('--numfilters', dest="num_filters", type=str, default="32,64,64", help="CNN number of filters per layer")
parser.add_argument('--numhidden', dest="num_hidden", type=int, default=512, help="CNN number of neurons in FC layer")
parser.add_argument('--duelingtype', dest="dueling_type", default=None, type=str, help="Type of dueling enabled")
# See
# http://cs231n.github.io/neural-networks-2/
parser.add_argument('--bias-init', dest="bias_init", type=float, default=0.01, help="The initial value of the biases")
# Parameters for training the CNN
parser.add_argument('--num-iterations', dest="num_iterations", type=int, default=50000000, help="Number of training iterations, i.e., number of passes, each pass using [batch size] number of examples")
parser.add_argument('--batchsize', dest="batch_size", type=int, default=32, help="training batch size")
parser.add_argument('--trainfreq', dest="train_freq", type=int, default=4, help="training frequency, default every frame")
parser.add_argument('--epsilonstep', dest="epsilon_step", type=float, default=1e6, help="epsilon decrease step, linear annealing over iterations")
parser.add_argument('--learnrate', dest="learning_rate", type=float, default=0.00025, help="optimization learning rate")
parser.add_argument('--learnratedecay', dest="learning_rate_decay", type=float, default=0.98, help="learning rate decay")
parser.add_argument('--learnratestep', dest="learning_rate_step", type=float, default=100000, help="learning rate decay step over iterations")
parser.add_argument('--learnratemin', dest="learning_rate_minimum", type=float, default=0.0001, help="minimum learning rate")
parser.add_argument('--discount', dest="discount", type=float, default=0.99, help="gamma for future discounted rewards")
parser.add_argument('--clipdelta', dest="clip_delta", type=bool, default=True, help="clipping of error term in loss function")
parser.add_argument('--networkupdate', dest="network_update_rate", type=float, default=10000, help="number of steps after which the Q-network is copied for predicting targets")
parser.add_argument('--batchaccumulator', dest="batch_accumulator", type=str, default="mean", help="batch accumulator in loss function (mean or sum)")
parser.add_argument('--replaycap', dest="replay_capacity", type=int, default=int(1e6), help="maximum number of samples in replay memory")
parser.add_argument('--trainstart', dest="train_start", type=int, default=50000, help="start training when replay memory is of this size")
# Parameters for evaluation of the model
parser.add_argument('--evalfreq', dest="eval_frequency", type=int, default=250000, help="frequency of model evaluation")
parser.add_argument('--evaliterations', dest="eval_iterations", type=int, default=125000, help="number of game iterations for each evaluation")
parser.add_argument('--evalepsilon', dest="eval_epsilon", type=float, default=0.05, help="epsilon random move when evaluating")
parser.add_argument('--minepsilon', dest="min_epsilon", type=float, default=0.1, help="Lowest epsilon when exploring")
parser.add_argument('--num-steps', dest="num_steps", type=int, default=5000, help="Number of test steps when playing, each step is an action")
parser.add_argument('--reward-recent', dest="reward_recent", type=int, default=1000, help="The number of episodes before resetting recent reward")
parser.add_argument('--num-games', dest="num_games", type=int, default=5000, help="Number of test games to play minesweeper")
# Parameters for outputting/debugging
parser.add_argument('--intsummary', dest="interval_summary", type=int, default=200, help="frequency of adding training summaries, currently depending on train_iteration")
parser.add_argument('--intcheckpoint', dest="interval_checkpoint", type=int, default=10000, help="frequency of saving model checkpoints")
parser.add_argument('--memorycheckpoint', dest="memory_checkpoint", type=int, default=int(1e5), help="Frequency of saving memory based on addition counter.")
parser.add_argument('--restore-memory', dest="restore_memory", type=bool, default=False, help="If True, restore replay memory.")
parser.add_argument('--show', dest="show_game", action="store_true", help="show the Atari game output")
parser.add_argument('--seed', dest="seed", type=int, default=0, help="The random seed value. Default at 0 means deterministic for all ops in Tensorflow 1.4")
# Parse command line arguments and run the training process
parser.set_defaults(game="minesweeper")
parser.set_defaults(env='minesweeper')
parser.set_defaults(mines_min=6)
parser.set_defaults(mines_max=6)
parser.set_defaults(input_width=6)
parser.set_defaults(input_height=6)
parser.set_defaults(history_length=1)
parser.set_defaults(train_freq=1) # This should be the same as history length
parser.set_defaults(nchannels=2)
parser.set_defaults(batch_size=400)
#parser.set_defaults(restore_memory=True)
parser.set_defaults(memory_checkpoint=int(5e5))
parser.set_defaults(train_start=int(8e5)) # Needs to be larger than batch-size, if reloading set to 0.
#parser.set_defaults(train_start=int(5e4)) # Needs to be larger than batch-size, if reloading set to 0.
#parser.set_defaults(train_start=int(100))
parser.set_defaults(replay_capacity=int(1e6))
parser.set_defaults(interval_checkpoint=int(2e4))
parser.set_defaults(eval_frequency=20000)
parser.set_defaults(eval_iterations=1000) # Changed to number of games player in minesweeper
parser.set_defaults(reward_recent_update=int(1e5))
parser.set_defaults(discount=0.0)
#parser.set_defaults(learning_rate=0.00025/4)
#parser.set_defaults(learning_rate=0.00025)
parser.set_defaults(learning_rate=0.00004)
#parser.set_defaults(learning_rate=0.001)
#parser.set_defaults(learning_rate_step=50000)
parser.set_defaults(learning_rate_step=20000)
parser.set_defaults(learning_rate_decay=0.90)
#parser.set_defaults(learning_rate_minimum=0.00025/4)
parser.set_defaults(learning_rate_minimum=0.00004)
parser.set_defaults(network_update_rate=int(1e5))
parser.set_defaults(min_epsilon=0.1)
parser.set_defaults(epsilon_step=2.5e5)
#parser.set_defaults(eval_epsilon=0.001) # For exploration
parser.set_defaults(network_type='conv')
#parser.set_defaults(clip_delta=True) # This should be False for minesweeper, it seems
#parser.set_defaults(dueling_type="mean") # Without this and with fc, the same network as Jacob
# If we want to play
parser.set_defaults(num_steps=500) # Number of steps to play atarai
parser.set_defaults(num_games=10000) # Number of games to play in minesweeper
#parser.set_defaults(model_file="model-1880000")
#parser.set_defaults(model_file="model-1680000")
#parser.set_defaults(model_file="model-1700000")
#parser.set_defaults(model_file="model-1720000")
parser.set_defaults(eval_iterations=10000) # For finally testing the best model
parser.set_defaults(is_train=True) # Note, something is wrong with the play code!!!
parser.set_defaults(show_game=False)
params = parser.parse_args()
train(params)
| jakejhansen/minesweeper_solver | q_learning/backup_output_net1_discount_0_batch_400_best/train.py | Python | mit | 10,684 |
#!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/usr/local'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.7.6",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| skynjupt/net_dev | src/libxml2/python/setup.py | Python | gpl-2.0 | 6,685 |
# encoding: utf-8
import sys, logging
from errorinfo import *
from xquery import TagQuery
from listening import ListeningProbe
from creator import Creator
from controller import *
d = DIRController()
d.start()
| prehawk1999/xiamione | main.py | Python | mit | 221 |
# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
from scrapy.exceptions import CloseSpider
from scraper.items import DbItem
import urlparse
import json
import requests as r
import re
website = 'http://zilek.fr'
description = """simple spider used to search for houses on zilek. Retrieves 25 newest
items"""
search_format = """{"ville":"New York","min_chambre":"3","max_prix":"3000000","rayon":"20", "min_surface":"30","min_prix" : "0"}"""
class ZilekSpider(CrawlSpider):
count = 0
name = 'zilek'
allowed_domains = ['zilek.fr']
rules = (
Rule(LinkExtractor(restrict_xpaths='//*[@id="content"]//a/i[@class="fa '+\
'fa-arrow-circle-right w3-xxlarge"]/..'), callback='parse_start_url', follow=True),
)
def __init__(self,sid = 0,max_items = 25,**search):
"""search contains :
ville, min_prix, max_prix, min_chambre,min_surface,rayon"""
super(ZilekSpider,self).__init__()
self.search = search
self.sid = sid
self.max = max_items
if(len(self.search) == 0):#scrapy check or misconfigured
self.search['ville'] = 'Poitiers'
self.search['min_chambre'] = '0'
self.search['max_prix'] = '5000000'
self.search['min_prix'] = '0'
self.search['rayon'] = '0'
self.search['min_surface'] = '0'
if('ville' not in search):
raise Exception("impossible de chercher sans ville!")
def start_requests(self):
myrequest = r.post('http://zilek.fr/ajax_get_city_name.pl',\
data={'region_id':self.search['ville']})
data = re.findall(r'(\w+)\s\((\d+)',myrequest.content)
for i in data:
yield self.make_requests_from_url("http://zilek.fr/immobilier/city/1/"+\
"bedrooms_min-"+self.search['min_chambre']+\
"~max_p-"+self.search['max_prix']+\
"~sort-date"+\
"~surface_min-"+self.search['min_surface']+\
"~min_p-"+self.search['min_prix']+\
"~radius-"+self.search['rayon']+\
"/"+i[0]+'_'+i[1]+'.htm')
def parse_start_url(self, response):
""" should parse the zilek research page
@url http://zilek.fr/immobilier/city/1/bedrooms_min-2~max_p-50000000~sort-popular~surface_min-50~min_p-50000~radius-100/Croutelle_86240.htm
@scrapes id image url description
"""
if(self.count == self.max): raise CloseSpider("scraped max number of items")
global website
items = []
for tr in response.xpath('//table//tr[@itemtype="http://data-vocabulary.org/Product"]'):
l = ItemLoader(DbItem(),tr)
l.add_xpath('id','@id',re = '\d+$')
l.add_xpath('image','td//img[@itemprop="image"]/@src')
l.add_xpath('url','td[@class="search_results_minipics_td"]/a/@href',\
MapCompose(lambda i: urlparse.urljoin(website,i)))
price = tr.xpath('td//span[@itemprop="price"]/text()').extract()[0]
commune = tr.xpath('td//a[@alt="ville"]/text()').extract()[0]
desc = tr.xpath('td//div/span[@itemprop="description"]/text()').extract()[0]
descstring = price+" euros <br>"+commune+"<br>"+desc
l.add_value('description',descstring)
l.add_value('search',self.sid)
l.add_value('website',website)
items.append(l.load_item())
self.count += 1
return items
| ForgottenBeast/spider_nest | scraper/scraper/spiders/zilek.py | Python | gpl-3.0 | 3,624 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Simple utility functions and bug fixes for compatibility with all supported
versions of Python. This module should generally not be used directly, as
everything in `__all__` will be imported into `astropy.utils.compat` and can
be accessed from there.
Includes the following fixes:
* The `contextlib.suppress` context manager, which is only available in Python
3.4 or greater.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
import functools
import sys
__all__ = ['invalidate_caches', 'override__dir__', 'suppress',
'possible_filename', 'namedtuple_asdict']
def possible_filename(filename):
"""
Determine if the ``filename`` argument is an allowable type for a filename.
In Python 3.3 use of non-unicode filenames on system calls such as
`os.stat` and others that accept a filename argument was deprecated (and
may be removed outright in the future).
Therefore this returns `True` in all cases except for `bytes` strings in
Windows on Python >= 3.3.
"""
if isinstance(filename, six.text_type):
return True
elif isinstance(filename, six.binary_type):
return not (sys.platform == 'win32' and
sys.version_info[:2] >= (3, 3))
return False
# Python 3.3's importlib caches filesystem reads for faster imports in the
# general case. But sometimes it's necessary to manually invalidate those
# caches so that the import system can pick up new generated files. See
# https://github.com/astropy/astropy/issues/820
if sys.version_info[:2] >= (3, 3):
from importlib import invalidate_caches
else:
invalidate_caches = lambda: None
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to
include the "standard" members on the object as well. This
decorator takes care of that automatically, and all the wrapped
function needs to do is return a list of the "special" members
that wouldn't be found by the normal Python means.
Example
-------
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
"""
if sys.version_info[:2] < (3, 3):
# There was no straightforward way to do this until Python 3.3, so
# we have this complex monstrosity
@functools.wraps(f)
def override__dir__wrapper(self):
members = set()
for cls in self.__class__.mro():
members.update(dir(cls))
members.update(six.iterkeys(self.__dict__))
members.update(f(self))
return sorted(members)
else:
# http://bugs.python.org/issue12166
@functools.wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
try:
from contextlib import suppress
except ImportError:
from contextlib import contextmanager
@contextmanager
def suppress(*exceptions):
"""A context manager for ignoring exceptions. Equivalent to::
try:
<body>
except exceptions:
pass
Example::
>>> import os
>>> with suppress(OSError):
... os.remove('file-that-does-not-exist')
"""
try:
yield
except exceptions:
pass
# For unclear reasons, the `_asdict` method of namedtuple produces an empty
# dictionary if the namedtuple is a subclass of another namedtuple... But
# *only* in py 3.3. >3.4 or 2.7 seem to work just fine. So we provide this
# for compatibility as long as 3.3 is supported.
# Furthermore, in python 3.4.x except for 3.4.4, `_asdict` produces only a
# *partial* dictionary. So we work around that case too.
if sys.version_info[0] == 3 and sys.version_info[:3] < (3, 4, 4):
def namedtuple_asdict(namedtuple):
"""
The same as ``namedtuple._adict()``, but fixed to work even when
namedtuple is a subclass of another namedtuple
Parameters
----------
namedtuple : collections.namedtuple
The named tuple to get the dict of
"""
return {fi: getattr(namedtuple, fi) for fi in namedtuple._fields}
else:
def namedtuple_asdict(namedtuple):
"""
The same as ``namedtuple._adict()``.
Parameters
----------
namedtuple : collections.namedtuple
The named tuple to get the dict of
"""
return namedtuple._asdict()
| AustereCuriosity/astropy | astropy/utils/compat/misc.py | Python | bsd-3-clause | 4,732 |
import datetime
import time
from collections import OrderedDict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.exceptions import PermissionDenied
from django.core.paginator import InvalidPage, Paginator
from django.db.models import QuerySet
from django.http import (
HttpResponseBadRequest, HttpResponseRedirect, JsonResponse,
)
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views import generic
from mymoney.core.templatetags.core_tags import (
currency_positive, localize_positive,
)
from .forms import (
BankTransactionCreateForm, BankTransactionListForm,
BankTransactionUpdateForm,
)
from .mixins import BankTransactionAccessMixin, BankTransactionSaveViewMixin
from .models import BankTransaction
class BankTransactionListView(BankTransactionAccessMixin, generic.FormView):
form_class = BankTransactionListForm
template_name = 'banktransactions/list/index.html'
paginate_by = 50
_session_key = 'banktransactionlistform'
def get_initial(self):
initial = super(BankTransactionListView, self).get_initial()
if self._session_key in self.request.session:
session = self.request.session
initial.update(session[self._session_key].get('filters', {}))
initial.update(session[self._session_key].get('raw_input', {}))
return initial
def get_form_kwargs(self):
kwargs = super(BankTransactionListView, self).get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['bt_ids'] = [bt.pk for bt in self.page]
submits = set(('filter', 'reset', 'action')) & set(self.request.POST.keys())
kwargs['submit'] = submits.pop() if submits else None
return kwargs
def get_context_data(self, **kwargs):
context = super(BankTransactionListView, self).get_context_data(**kwargs)
context['bankaccount'] = self.bankaccount
context['has_filters'] = self._session_key in self.request.session
context['current_balance'] = (
BankTransaction.objects.get_current_balance(self.bankaccount)
)
context['reconciled_balance'] = (
BankTransaction.objects.get_reconciled_balance(self.bankaccount)
)
context['object_list'] = self.page.object_list
context['page_obj'] = self.page
context['is_paginated'] = self.page.has_other_pages()
return context
def get_success_url(self):
self.success_url = reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.bankaccount.pk,
})
return super(BankTransactionListView, self).get_success_url()
def form_valid(self, form):
if 'filter' in self.request.POST:
filters, raw_input = {}, {}
for key, value in form.cleaned_data.items():
value = list(value) if isinstance(value, QuerySet) else value
if (key.startswith('banktransaction_') or
key == 'operation' or
value in form.fields[key].empty_values):
continue
if key == 'tags':
data = [tag.pk for tag in value]
elif key.startswith('date_') or key.startswith('amount_'):
raw_input[key] = self.request.POST.get(key, None)
data = str(value)
else:
data = value
filters[key] = data
self.request.session[self._session_key] = {
'filters': filters,
'raw_input': raw_input,
}
elif 'reset' in self.request.POST:
if self._session_key in self.request.session:
del self.request.session[self._session_key]
elif 'action' in self.request.POST: # pragma: no branch
op = form.cleaned_data['operation']
ids = form.cleaned_data['banktransactions']
if op == 'reconcile':
(BankTransaction.objects
.filter(pk__in=ids)
.update(reconciled=True))
messages.success(
self.request,
_('Bank transaction have been reconciled.'),
)
elif op == 'unreconcile':
(BankTransaction.objects
.filter(pk__in=ids)
.update(reconciled=False))
messages.success(
self.request,
_('Undo bank transaction reconciled.'),
)
elif op == 'delete': # pragma: no branch
self.request.session['banktransactionlistdelete'] = list(ids)
return HttpResponseRedirect(reverse(
'banktransactions:delete_multiple', kwargs={
'bankaccount_pk': self.bankaccount.pk
}
))
return super(BankTransactionListView, self).form_valid(form)
@cached_property
def page(self):
paginator = Paginator(self.queryset, self.paginate_by)
try:
page = paginator.page(self.request.GET.get('page'))
except InvalidPage:
page = paginator.page(1)
return page
@property
def queryset(self):
qs = (
BankTransaction.objects
.filter(bankaccount=self.bankaccount)
.select_related('tag')
.order_by('-date', '-id')
)
qs = queryset_extra_balance_fields(qs, self.bankaccount)
if self._session_key in self.request.session:
filters = self.request.session[self._session_key].get('filters', {})
if 'label' in filters:
qs = qs.filter(label__icontains=filters['label'])
if 'date_start' in filters and 'date_end' in filters:
qs = qs.filter(date__range=(
filters['date_start'],
filters['date_end'])
)
elif 'date_start' in filters:
qs = qs.filter(date__gte=filters['date_start'])
elif 'date_end' in filters:
qs = qs.filter(date__lte=filters['date_end'])
if 'amount_min' in filters and 'amount_max' in filters:
qs = qs.filter(amount__range=(
filters['amount_min'],
filters['amount_max'])
)
elif 'amount_min' in filters:
qs = qs.filter(amount__gte=filters['amount_min'])
elif 'amount_max' in filters:
qs = qs.filter(amount__lte=filters['amount_max'])
if 'status' in filters:
qs = qs.filter(status=filters['status'])
if 'reconciled' in filters:
qs = qs.filter(reconciled=filters['reconciled'])
if 'tags' in filters:
qs = qs.filter(tag__in=filters['tags'])
return qs
class BankTransactionCalendarView(BankTransactionAccessMixin,
generic.TemplateView):
template_name = 'banktransactions/calendar/index.html'
def get_context_data(self, **kwargs):
context = super(BankTransactionCalendarView, self).get_context_data()
context['bankaccount'] = self.bankaccount
context['calendar_ajax_url'] = reverse(
'banktransactions:calendar_ajax_events',
kwargs={'bankaccount_pk': self.bankaccount.pk},
)
if (not settings.MYMONEY['USE_L10N_DIST'] and
settings.MYMONEY['BOOTSTRAP_CALENDAR_LANGCODE']):
context['bootstrap_calendar_langcode'] = (
'bower_components/bootstrap-calendar/js/language/{lang}.js'.format(
lang=settings.MYMONEY['BOOTSTRAP_CALENDAR_LANGCODE'],
)
)
return context
class BankTransactionCalendarEventsAjax(BankTransactionAccessMixin,
generic.View):
def get(self, request, *args, **kwargs):
try:
date_start = datetime.date.fromtimestamp(int(request.GET.get('from')) / 1000)
date_end = datetime.date.fromtimestamp(int(request.GET.get('to')) / 1000)
except Exception:
return HttpResponseBadRequest("Parameters 'from' and 'to' must be "
"valid timestamp in milliseconds.")
qs = (
BankTransaction.objects.filter(
bankaccount=self.bankaccount,
date__range=(date_start, date_end),
)
)
qs = queryset_extra_balance_fields(qs, self.bankaccount)
qs = qs.order_by('date')
events = []
for banktransaction in qs:
timestamp_ms = time.mktime(banktransaction.date.timetuple()) * 1000
events.append({
"id": banktransaction.id,
"url": reverse('banktransactions:calendar_ajax_event', kwargs={
'pk': banktransaction.pk,
}),
"title": "{label}, {amount}".format(
label=banktransaction.label,
amount=currency_positive(
banktransaction.amount,
banktransaction.currency,
),
),
"class": "event-important" if banktransaction.amount < 0 else "event-success",
"start": timestamp_ms,
"end": timestamp_ms,
"extra_data": {
"label": banktransaction.label,
"total_balance": banktransaction.total_balance,
"total_balance_view": localize_positive(
banktransaction.total_balance
),
"reconciled_balance": banktransaction.reconciled_balance,
"reconciled_balance_view": localize_positive(
banktransaction.reconciled_balance
),
},
})
return JsonResponse({
"success": 1,
"result": events,
})
class BankTransactionCalendarEventAjax(generic.TemplateView):
template_name = 'banktransactions/calendar/modal_content.html'
object = None
def dispatch(self, request, *args, **kwargs):
self.object = get_object_or_404(BankTransaction, pk=kwargs.get('pk'))
if not self.object.bankaccount.owners.filter(pk=request.user.pk).exists():
raise PermissionDenied
return super(BankTransactionCalendarEventAjax, self).dispatch(
request, *args, **kwargs
)
def get_context_data(self, **kwargs):
context = super(BankTransactionCalendarEventAjax, self).get_context_data()
context['banktransaction'] = self.object
if self.request.user.has_perm('banktransactions.change_banktransaction'):
context['url_edit'] = reverse(
'banktransactions:update', kwargs={'pk': self.object.pk},
)
if self.request.user.has_perm('banktransactions.delete_banktransaction'):
context['url_delete'] = reverse(
'banktransactions:delete', kwargs={'pk': self.object.pk},
)
return context
class BankTransactionCreateView(PermissionRequiredMixin,
BankTransactionAccessMixin,
BankTransactionSaveViewMixin,
SuccessMessageMixin,
generic.CreateView):
form_class = BankTransactionCreateForm
permission_required = ('banktransactions.add_banktransaction',)
raise_exception = True
success_message = ugettext_lazy(
"Bank transaction %(label)s was created successfully."
)
def get_initial(self):
initial = super(BankTransactionCreateView, self).get_initial()
if self.request.GET.get('self-redirect', False):
initial['redirect'] = True
return initial
def form_valid(self, form):
response = super(BankTransactionCreateView, self).form_valid(form)
if form.cleaned_data['redirect']:
url_redirect = reverse('banktransactions:create', kwargs={
'bankaccount_pk': self.object.bankaccount.pk,
}) + '?self-redirect=1'
return HttpResponseRedirect(url_redirect)
return response
class BankTransactionUpdateView(PermissionRequiredMixin,
BankTransactionAccessMixin,
BankTransactionSaveViewMixin,
SuccessMessageMixin,
generic.UpdateView):
form_class = BankTransactionUpdateForm
permission_required = ('banktransactions.change_banktransaction',)
raise_exception = True
success_message = ugettext_lazy(
"Bank transaction %(label)s was updated successfully."
)
class BankTransactionDeleteView(PermissionRequiredMixin,
BankTransactionAccessMixin,
generic.DeleteView):
model = BankTransaction
permission_required = ('banktransactions.delete_banktransaction',)
raise_exception = True
def get_success_url(self):
"""
Override parent to dynamically set success url.
"""
self.success_url = reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.object.bankaccount.pk,
})
return super(BankTransactionDeleteView, self).get_success_url()
class BankTransactionDeleteMultipleView(PermissionRequiredMixin,
BankTransactionAccessMixin,
generic.TemplateView):
template_name = 'banktransactions/banktransaction_confirm_delete_multiple.html'
permission_required = ('banktransactions.delete_banktransaction',)
raise_exception = True
banktransactions = None
def dispatch(self, request, *args, **kwargs):
if 'banktransactionlistdelete' in self.request.session:
self.banktransactions = BankTransaction.objects.filter(
pk__in=self.request.session['banktransactionlistdelete']
)
if not self.banktransactions:
raise PermissionDenied
return super(BankTransactionDeleteMultipleView, self).dispatch(
request, *args, **kwargs
)
def post(self, request, *args, **kwargs):
for banktransaction in self.banktransactions:
banktransaction.delete()
del self.request.session['banktransactionlistdelete']
messages.success(request, "Bank transactions deleted successfully.")
return HttpResponseRedirect(reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.bankaccount.pk,
}))
def get_context_data(self, **kwargs):
context = super(BankTransactionDeleteMultipleView, self).get_context_data(**kwargs)
context['bankaccount'] = self.bankaccount
context['banktransactions'] = self.banktransactions
return context
def queryset_extra_balance_fields(qs, bankaccount):
"""
Add extra fields to the queryset provided. Useful if you need to know
previous balance of the current row, no matter which filters/orders are
applied.
Extra fields are:
- total_balance
- reconciled_balance
"""
# Unfortunetly, we cannot get it by doing the opposite (i.e :
# total balance - SUM(futur bt) because with postgreSQL at least,
# the last dated bt would give None : total balance - SUM(NULL).
# It could be usefull because most of the time, we are seeing the
# latest pages, not the first (past).
total_balance_subquery = """
SELECT SUM(bt_sub.amount) + {balance_initial}
FROM {table} AS bt_sub
WHERE
bt_sub.bankaccount_id = %s
AND (
bt_sub.date < {table}.date
OR (
bt_sub.date = {table}.date
AND
bt_sub.id <= {table}.id
)
)
""".format(
table=BankTransaction._meta.db_table,
balance_initial=bankaccount.balance_initial,
)
reconciled_balance_subquery = """
SELECT SUM(bt_sub_r.amount) + {balance_initial}
FROM {table} AS bt_sub_r
WHERE
bt_sub_r.bankaccount_id = %s
AND
bt_sub_r.reconciled = \'1\'
AND (
bt_sub_r.date < {table}.date
OR (
bt_sub_r.date = {table}.date
AND
bt_sub_r.id <= {table}.id
)
)""".format(
table=BankTransaction._meta.db_table,
balance_initial=bankaccount.balance_initial,
)
return qs.extra(
select=OrderedDict([
('total_balance', total_balance_subquery),
('reconciled_balance', reconciled_balance_subquery),
]),
select_params=(bankaccount.pk, bankaccount.pk)
)
| ychab/mymoney | mymoney/apps/banktransactions/views.py | Python | bsd-3-clause | 17,455 |
# -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class ResLang(models.Model):
_inherit = 'res.lang'
default_uom_ids = fields.Many2many(
string='Default Units',
comodel_name='product.uom',
)
@api.multi
@api.constrains('default_uom_ids')
def _check_default_uom_ids(self):
for record in self:
categories = set(record.default_uom_ids.mapped('category_id'))
if len(categories) != len(record.default_uom_ids):
raise ValidationError(_(
'Only one default unit of measure per category may '
'be selected.',
))
@api.model
def default_uom_by_category(self, category_name, lang=None):
"""Return the default UoM for language for the input UoM Category.
Args:
category_name (str): Name of the UoM category to get the default
for.
lang (ResLang or str, optional): Recordset or code of the language
to get the default for. Will use the current user language if
omitted.
Returns:
ProductUom: Unit of measure representing the default, if set.
Empty recordset otherwise.
"""
if lang is None:
lang = self.env.user.lang
if isinstance(lang, basestring):
lang = self.env['res.lang'].search([
('code', '=', lang),
],
limit=1,
)
results = lang.default_uom_ids.filtered(
lambda r: r.category_id.name == category_name,
)
return results[:1]
| ovnicraft/server-tools | base_locale_uom_default/models/res_lang.py | Python | agpl-3.0 | 1,775 |
import os
import xbmcgui
import xbmcaddon
import pyxbmct.addonwindow as pyxbmct
AddonID = "plugin.video.israelive"
_addon = xbmcaddon.Addon(AddonID)
_path = _addon.getAddonInfo("path")
_check_icon = os.path.join(_path, "check.png") # Don't decode _path to utf-8!!!
class MultiChoiceDialog(pyxbmct.AddonDialogWindow):
def __init__(self, title="", items=[]):
super(MultiChoiceDialog, self).__init__(title)
self.setGeometry(450, 600, 10, 4)
self.selected = []
self.set_controls()
self.connect_controls()
self.listing.addItems(items)
self.set_navigation()
def set_controls(self):
self.listing = pyxbmct.List(_imageWidth=15)
self.placeControl(self.listing, 0, 0, rowspan=9, columnspan=4)
self.ok_button = pyxbmct.Button("OK")
self.placeControl(self.ok_button, 9, 1)
self.cancel_button = pyxbmct.Button("Cancel")
self.placeControl(self.cancel_button, 9, 2)
def connect_controls(self):
self.connect(self.listing, self.check_uncheck)
self.connect(self.ok_button, self.ok)
self.connect(self.cancel_button, self.close)
def set_navigation(self):
self.listing.controlUp(self.ok_button)
self.listing.controlDown(self.ok_button)
self.ok_button.setNavigation(self.listing, self.listing, self.cancel_button, self.cancel_button)
self.cancel_button.setNavigation(self.listing, self.listing, self.ok_button, self.ok_button)
self.setFocus(self.listing)
def check_uncheck(self):
list_item = self.listing.getSelectedItem()
if list_item.getLabel2() == "checked":
list_item.setIconImage("")
list_item.setLabel2("unchecked")
else:
list_item.setIconImage(_check_icon)
list_item.setLabel2("checked")
def ok(self):
for index in range(self.listing.size()):
if self.listing.getListItem(index).getLabel2() == "checked":
self.selected.append(index)
super(MultiChoiceDialog, self).close()
def close(self):
self.selected = []
super(MultiChoiceDialog, self).close()
| guymakam/Kodi-Israel | plugin.video.israelive/resources/lib/multiChoiceDialog.py | Python | gpl-2.0 | 1,922 |
import re
from urllib.parse import quote_plus, urljoin
from sickchill import logger
from sickchill.helper.common import convert_size, try_int
from sickchill.oldbeard import db, tvcache
from sickchill.oldbeard.bs4_parser import BS4Parser
from sickchill.oldbeard.common import Quality
from sickchill.oldbeard.name_parser.parser import InvalidNameException, InvalidShowException, NameParser
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class Provider(TorrentProvider):
def __init__(self):
super().__init__("ilCorsaroNero")
categories = [ # Categories included in searches
15, # Serie TV
5, # Anime
1, # BDRip
20, # DVD
19, # Screener
]
categories = ",".join(map(str, categories))
self.url = "https://ilcorsaronero.link"
self.urls = {
"search": urljoin(self.url, "argh.php?search={0}&order=data&by=DESC&page={1}&category=" + categories),
}
self.public = True
self.minseed = 0
self.minleech = 0
self.engrelease = None
self.subtitle = None
self.max_pages = 10
self.proper_strings = ["PROPER", "REPACK"]
self.sub_string = ["sub", "softsub"]
self.hdtext = [
" - Versione 720p",
" Versione 720p",
" V 720p",
" V 720",
" V HEVC",
" V HEVC",
" V 1080",
" Versione 1080p",
" 720p HEVC",
" Ver 720",
" 720p HEVC",
" 720p",
]
self.cache = tvcache.TVCache(self, min_time=30) # only poll ilCorsaroNero every 30 minutes max
@staticmethod
def _reverseQuality(quality):
quality_string = ""
if quality == Quality.SDTV:
quality_string = " HDTV x264"
if quality == Quality.SDDVD:
quality_string = " DVDRIP"
elif quality == Quality.HDTV:
quality_string = " 720p HDTV x264"
elif quality == Quality.FULLHDTV:
quality_string = " 1080p HDTV x264"
elif quality == Quality.RAWHDTV:
quality_string = " 1080i HDTV mpeg2"
elif quality == Quality.HDWEBDL:
quality_string = " 720p WEB-DL h264"
elif quality == Quality.FULLHDWEBDL:
quality_string = " 1080p WEB-DL h264"
elif quality == Quality.HDBLURAY:
quality_string = " 720p Bluray x264"
elif quality == Quality.FULLHDBLURAY:
quality_string = " 1080p Bluray x264"
return quality_string
@staticmethod
def _episodeQuality(torrent_rows):
"""
Return The quality from the scene episode HTML row.
"""
file_quality = (torrent_rows("td"))[1].find("a")["href"].replace("_", " ")
logger.debug("Episode quality: {0}".format(file_quality))
def checkName(options, func):
return func([re.search(option, file_quality, re.I) for option in options])
dvdOptions = checkName(["dvd", "dvdrip", "dvdmux", "DVD9", "DVD5"], any)
bluRayOptions = checkName(["BD", "BDmux", "BDrip", "BRrip", "Bluray"], any)
sdOptions = checkName(["h264", "divx", "XviD", "tv", "TVrip", "SATRip", "DTTrip", "Mpeg2"], any)
hdOptions = checkName(["720p"], any)
fullHD = checkName(["1080p", "fullHD"], any)
webdl = checkName(["webdl", "webmux", "webrip", "dl-webmux", "web-dlmux", "webdl-mux", "web-dl", "webdlmux", "dlmux"], any)
if sdOptions and not dvdOptions and not fullHD and not hdOptions:
return Quality.SDTV
elif dvdOptions:
return Quality.SDDVD
elif hdOptions and not bluRayOptions and not fullHD and not webdl:
return Quality.HDTV
elif not hdOptions and not bluRayOptions and fullHD and not webdl:
return Quality.FULLHDTV
elif hdOptions and not bluRayOptions and not fullHD and webdl:
return Quality.HDWEBDL
elif not hdOptions and not bluRayOptions and fullHD and webdl:
return Quality.FULLHDWEBDL
elif bluRayOptions and hdOptions and not fullHD:
return Quality.HDBLURAY
elif bluRayOptions and fullHD and not hdOptions:
return Quality.FULLHDBLURAY
else:
return Quality.UNKNOWN
def _is_italian(self, name):
if not name or name == "None":
return False
subFound = italian = False
for sub in self.sub_string:
if re.search(sub, name, re.I):
subFound = True
else:
continue
if re.search("ita", name.split(sub)[0], re.I):
logger.debug("Found Italian release: " + name)
italian = True
break
if not subFound and re.search("ita", name, re.I):
logger.debug("Found Italian release: " + name)
italian = True
return italian
@staticmethod
def _is_english(name):
if not name or name == "None":
return False
english = False
if re.search("eng", name, re.I):
logger.debug("Found English release: " + name)
english = True
return english
@staticmethod
def _is_season_pack(name):
try:
parse_result = NameParser(tryIndexers=True).parse(name)
except (InvalidNameException, InvalidShowException) as error:
logger.debug("{0}".format(error))
return False
main_db_con = db.DBConnection()
sql_selection = "select count(*) as count from tv_episodes where showid = ? and season = ?"
episodes = main_db_con.select(sql_selection, [parse_result.show.indexerid, parse_result.season_number])
if int(episodes[0]["count"]) == len(parse_result.episode_numbers):
return True
@staticmethod
def _magnet_from_result(info_hash, title):
return "magnet:?xt=urn:btih:{hash}&dn={title}&tr={trackers}".format(
hash=info_hash, title=quote_plus(title), trackers="http://tracker.tntvillage.scambioetico.org:2710/announce"
)
def search(self, search_params, age=0, ep_obj=None):
results = []
for mode in search_params:
items = []
logger.debug(_("Search Mode: {mode}".format(mode=mode)))
for search_string in search_params[mode]:
if search_string == "":
continue
search_string = str(search_string).replace(".", " ")
logger.debug(_("Search String: {search_string}".format(search_string=search_string)))
last_page = False
for page in range(0, self.max_pages):
if last_page:
break
logger.debug("Processing page {0} of results".format(page))
search_url = self.urls["search"].format(search_string, page)
data = self.get_url(search_url, returns="text")
if not data:
logger.debug(_("No data returned from provider"))
continue
try:
with BS4Parser(data, "html5lib") as html:
table_header = html.find("tr", class_="bordo")
torrent_table = table_header.find_parent("table") if table_header else None
if not torrent_table:
logger.exception("Could not find table of torrents")
continue
torrent_rows = torrent_table("tr")
# Continue only if one Release is found
if len(torrent_rows) < 6 or len(torrent_rows[2]("td")) == 1:
logger.debug("Data returned from provider does not contain any torrents")
last_page = True
continue
if len(torrent_rows) < 45:
last_page = True
for result in torrent_rows[2:-3]:
result_cols = result("td")
if len(result_cols) == 1:
# Ignore empty rows in the middle of the table
continue
try:
title = result("td")[1].get_text(strip=True)
torrent_size = result("td")[2].get_text(strip=True)
info_hash = result("td")[3].find("input", class_="downarrow")["value"].upper()
download_url = self._magnet_from_result(info_hash, title)
seeders = try_int(result("td")[5].get_text(strip=True))
leechers = try_int(result("td")[6].get_text(strip=True))
size = convert_size(torrent_size) or -1
except (AttributeError, IndexError, TypeError):
continue
filename_qt = self._reverseQuality(self._episodeQuality(result))
for text in self.hdtext:
title1 = title
title = title.replace(text, filename_qt)
if title != title1:
break
if Quality.nameQuality(title) == Quality.UNKNOWN:
title += filename_qt
if not self._is_italian(title) and not self.subtitle:
logger.debug("Torrent is subtitled, skipping: {0}".format(title))
continue
if self.engrelease and not self._is_english(title):
logger.debug("Torrent isn't english audio/subtitled, skipping: {0}".format(title))
continue
search_show = re.split(r"([Ss][\d{1,2}]+)", search_string)[0]
show_title = search_show
ep_params = ""
rindex = re.search(r"([Ss][\d{1,2}]+)", title)
if rindex:
show_title = title[: rindex.start()]
ep_params = title[rindex.start() :]
if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
new_title = search_show + ep_params
title = new_title
if not all([title, download_url]):
continue
if self._is_season_pack(title):
title = re.sub(r"([Ee][\d{1,2}\-?]+)", "", title)
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
logger.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers
)
)
continue
item = {"title": title, "link": download_url, "size": size, "seeders": seeders, "leechers": leechers, "hash": info_hash}
if mode != "RSS":
logger.debug(
_(
"Found result: {title} with {seeders} seeders and {leechers} leechers".format(
title=title, seeders=seeders, leechers=leechers
)
)
)
items.append(item)
except Exception as error:
logger.exception("Failed parsing provider. Error: {0}".format(error))
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get("seeders", 0)), reverse=True)
results += items
return results
| h3llrais3r/SickRage | sickchill/oldbeard/providers/ilcorsaronero.py | Python | gpl-3.0 | 12,888 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Docker JSON parser."""
import unittest
from plaso.lib import definitions
from plaso.parsers import docker
from tests.parsers import test_lib
class DockerJSONUnitTest(test_lib.ParserTestCase):
"""Tests for the Docker JSON parser."""
def testParseContainerLog(self):
"""Tests the _ParseContainerLogJSON function."""
container_identifier = (
'e7d0b7ea5ccf08366e2b0c8afa2318674e8aefe802315378125d2bb83fe3110c')
parser = docker.DockerJSONParser()
path_segments = [
'docker', 'containers', container_identifier, 'container-json.log']
storage_writer = self._ParseFile(path_segments, parser)
self.assertEqual(storage_writer.number_of_events, 10)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_timestamps = [
'2016-01-07 16:49:10.000000',
'2016-01-07 16:49:10.200000',
'2016-01-07 16:49:10.230000',
'2016-01-07 16:49:10.237000',
'2016-01-07 16:49:10.237200',
'2016-01-07 16:49:10.237220',
'2016-01-07 16:49:10.237222',
'2016-01-07 16:49:10.237222', # losing sub microsec info
'2016-01-07 16:49:10.237222',
'2016-01-07 16:49:10.237222']
expected_event_values = {
'container_id': container_identifier,
'data_type': 'docker:json:container:log',
'log_line': (
'\x1b]0;root@e7d0b7ea5ccf: '
'/home/plaso\x07root@e7d0b7ea5ccf:/home/plaso# ls\r\n'),
'log_source': 'stdout'}
for index, event in enumerate(events):
self.CheckTimestamp(event.timestamp, expected_timestamps[index])
self.CheckEventValues(storage_writer, event, expected_event_values)
def testParseContainerConfig(self):
"""Tests the _ParseContainerConfigJSON function."""
container_identifier = (
'e7d0b7ea5ccf08366e2b0c8afa2318674e8aefe802315378125d2bb83fe3110c')
parser = docker.DockerJSONParser()
path_segments = [
'docker', 'containers', container_identifier, 'config.json']
storage_writer = self._ParseFile(path_segments, parser)
self.assertEqual(storage_writer.number_of_events, 2)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'action': 'Container Started',
'container_id': container_identifier,
'container_name': 'e7d0b7ea5ccf',
'data_type': 'docker:json:container',
'date_time': '2016-01-07 16:49:08.674873'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'action': 'Container Created',
'container_id': container_identifier,
'container_name': 'e7d0b7ea5ccf',
'data_type': 'docker:json:container',
'date_time': '2016-01-07 16:49:08.507979'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testParseLayerConfig(self):
"""Tests the _ParseLayerConfigJSON function."""
layer_identifier = (
'3c9a9d7cc6a235eb2de58ca9ef3551c67ae42a991933ba4958d207b29142902b')
parser = docker.DockerJSONParser()
path_segments = ['docker', 'graph', layer_identifier, 'json']
storage_writer = self._ParseFile(path_segments, parser)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'command': (
'/bin/sh -c sed -i \'s/^#\\s*\\(deb.*universe\\)$/\\1/g\' '
'/etc/apt/sources.list'),
'data_type': 'docker:json:layer',
'date_time': '2015-10-12 17:27:03.079273',
'layer_id': layer_identifier,
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| kiddinn/plaso | tests/parsers/docker.py | Python | apache-2.0 | 4,229 |
import cherrypy
# 這是 C2G6 類別的定義
class C2G6(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014C2 協同專案下的 c2g6 分組程式開發網頁, 以下為 W12 的任務執行內容.<br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="fillpoly">c2g6 fillpoly 繪圖</a><br />
<a href="drawline">c2g6 drawline 繪圖</a><br />
<a href="triangle">c2g6 triangle 繪圖</a><br />
<a href="triangle2">c2g6 triangle2 繪圖</a><br />
<a href="japanflag">c2g6 japanflag 繪圖</a><br />
<a href="usaflag">c2g6 usaflag 繪圖</a><br />
'''
return outstring
# 以下為 c2g2 組所建立的 CherryPy 程式方法, 這裡的 fillpoly 利用 Brython 執行網際繪圖
'''
假如採用下列規畫
import programs.c2g6 as c2g6
root.c2g6 = c2g6.C2G6()
則程式啟動後, 可以利用 /c2g2/fillpoly 呼叫函式執行
'''
@cherrypy.expose
def fillpoly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入數學模組的所有方法
from math import *
# 導入時間模組
import time
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 定義座標轉換(0, 0) 到 (75, 20)
def change_ref_system(x, y):
return (20 + x * 8, 420 - y * 20)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def fill():
ctx.beginPath()
ctx.moveTo(75,50)
ctx.lineTo(100,75)
ctx.lineTo(100,25)
ctx.fill()
def star():
ctx.beginPath()
ctx.moveTo(0,50)
ctx.lineTo(11,16)
ctx.lineTo(48,16)
ctx.fill()
ctx.fillStyle = "blue"
fill()
star()
x1, y1 = change_ref_system(0, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="blue")
x1, y1 = change_ref_system(70, 0)
for 索引 in range(0, 70, 4):
x2, y2 = change_ref_system(索引, 20)
draw_line(x1, y1, x2, y2, linethick=3, color="red")
</script>
</body>
</html>
'''
return outstring
'''
假如採用下列規畫
import programs.c2g6 as c2g6
root.c2g6 = c2g6.C2G6()
則程式啟動後, 可以利用 /c2g1/drawline 呼叫函式執行
'''
@cherrypy.expose
def drawline(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "black"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(0, 0, 100, 100)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def triangle(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "blue"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
draw_line(100, 100, 150, 250)
draw_line(150, 250, 400, 400)
draw_line(400, 400, 100, 100)
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def triangle2(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# 定義畫線函式
def draw_line(x1, y1, x2, y2, linethick = 3, color = "blue"):
ctx.beginPath()
ctx.lineWidth = linethick
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = color
ctx.stroke()
def fill():
ctx.beginPath()
ctx.moveTo(100,100)
ctx.lineTo(150,250)
ctx.lineTo(400,400)
ctx.fill()
ctx.fillStyle = "red"
fill()
draw_line(100, 100, 150, 250, linethick = 3, color = "blue")
draw_line(150, 250, 400, 400, linethick = 3, color = "blue")
draw_line(400, 400, 100, 100, linethick = 3, color = "blue")
</script>
</body>
</html>
'''
return outstring
'''
假如採用下列規畫
import programs.c2g7 as c2g7
root.c2g7 = c2g7.C2G7()
則程式啟動後, 可以利用 /c2g1/drawline 呼叫函式執行
'''
@cherrypy.expose
def japanflag(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="300" height="200"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
import math
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 canvas.height 單位光點
# ctx.setTransform(1, 0, 0, -1, 0, canvas.height)
# 以下採用 canvas 原始座標繪圖
flag_w = canvas.width
flag_h = canvas.height
circle_x = flag_w/2
circle_y = flag_h/2
# 先畫白
ctx.fillStyle= 'black'
ctx.fillRect(0,0,flag_w,flag_h)
ctx.fillStyle= 'white'
ctx.fillRect(0,0,flag_w-10,flag_h-10)
# 白日:red心
ctx.beginPath()
ctx.arc(circle_x, circle_y, flag_w/8, flag_h/8, math.pi*2 , true)
ctx.closePath()
# 填色設為red
ctx.fillStyle = 'red'
ctx.fill()
</script>
</body>
</html>
'''
return outstring
'''
假如採用下列規畫
import programs.c2g7 as c2g7
root.c2g7 = c2g7.C2G7()
則程式啟動後, 可以利用 /c2g1/drawline 呼叫函式執行
'''
@cherrypy.expose
def usaflag(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/Brython2.1.0-20140419-113919/brython.js"></script>
</head>
<body onload="brython({debug:1, cache:'version'})">
<canvas id="plotarea" width="800" height="800"></canvas>
<script type="text/python">
# 導入 doc
from browser import doc
import math
# 準備繪圖畫布
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# 進行座標轉換, x 軸不變, y 軸反向且移動 800 光點
ctx.setTransform(1, 0, 0, -1, 0, 800)
# x, y 為中心, r 為半徑, angle 旋轉角, solid 空心或實心, color 顏色
def star(x, y, r, angle=0, solid=False, color="white"):
# 以 x, y 為圓心, 計算五個外點
deg = math.pi/180
# 圓心到水平線距離
a = r*math.cos(72*deg)
# a 頂點向右到內點距離
b = (r*math.cos(72*deg)/math.cos(36*deg))*math.sin(36*deg)
# 利用畢氏定理求內點半徑
rin = math.sqrt(a**2 + b**2)
# 查驗 a, b 與 rin
#print(a, b, rin)
if(solid):
ctx.beginPath()
for i in range(5):
xout = (x + r*math.sin(72*deg*i+angle*deg))
yout = (y + r*math.cos(72*deg*i+angle*deg))
# 外點增量 + 1
xout2 = x + r*math.sin(72*deg*(i+1)+angle*deg)
yout2 = y + r*math.cos(72*deg*(i+1)+angle*deg)
xin = x + rin*math.sin(72*deg*i+36*deg+angle*deg)
yin = y + rin*math.cos(72*deg*i+36*deg+angle*deg)
# 查驗外點與內點座標
#print(xout, yout, xin, yin)
if(solid):
# 填色
if(i==0):
ctx.moveTo(xout, yout)
ctx.lineTo(xin, yin)
ctx.lineTo(xout2, yout2)
else:
ctx.lineTo(xin, yin)
ctx.lineTo(xout2, yout2)
if(solid):
ctx.fillStyle = 'white'
ctx.fill()
for i in range(7):
ctx.fillStyle='rgb(255, 0, 0)'
ctx.fillRect(0, 0+40*i, 390, 20)
ctx.fillStyle='rgb(0, 0, 149)'
ctx.fillRect(0, 120, 210, 140)
for i in range(6):
for j in range(5):
star(20+34*i, 134+28*j, 8, 0, True, "white")
for i in range(5):
for j in range(4):
star(36+34*i, 148+28*j, 8, 0, True, "white")
</script>
</body>
</html>
'''
return outstring | 2014c2g2/teamwork | wsgi/programs/c2g6/__init__.py | Python | gpl-2.0 | 10,952 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import copy
class Polybios(object):
def __init__(self):
self.cadena = ''
self.textoCifrado = ''
self.textoClaro = ''
self.alfabeto = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
self.alfabeto_en_may = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
self.alfabeto_numeros =['11', '12', '13', '14', '15', '21', '22', '23',
'24', '25', '31', '32', '33', '34', '35', '41', '41', '43', '44',
'45', '51', '52', '53', '54', '55']
self.tabla = [[0] * 5 for row in range(5)]
tabla = [[0] * 5 for row in range(5)]
cont = 0
for y in range(5):
for x in range(5):
tabla[y][x] = self.alfabeto[cont]
cont = cont + 1
self.tabla = tabla
def definirAlfabeto(self, alfabeto):
if(alfabeto== "es_min"):
self.alfabeto = copy.copy(self.alfabeto_es_min)
if(alfabeto== "es_may"):
self.alfabeto = copy.copy(self.alfabeto_es_may)
if(alfabeto== "en_may"):
self.alfabeto = copy.copy(self.alfabeto_en_may)
if(alfabeto == "num"):
self.alfabeto = copy.copy(self.alfabeto_num)
if(alfabeto== "b64"):
self.alfabeto = copy.copy(self.alfabeto_base64)
def getStr(x, format='%02s'):
return ''.join(format % i for i in x)
def imprimir_tabla(tabla):
print(' ' + getStr(range(1, 6)))
for row in range(0, len(tabla)):
print(str(row + 1) + getStr(tabla[row]))
def cifrar(self, palabras, rellenoB64, tabla):
#cadena, cantidadRelleno, clave
string = self.tabla
cifrado = ''
for ch in palabras:
for row in range(len(self.tabla)):
if ch in self.tabla[row]:
x = str((self.tabla[row].index(ch) + 1))
y = str(row + 1)
cifrado += y + x
#self.textoCifrado = cifrado
print(cifrado)
return cifrado
def descifrar(self, numeros, rellenoB64, tabla):
texto = ''
for index in range(0, len(numeros), 2):
y = int(numeros[index]) - 1
x = int(numeros[index + 1]) - 1
texto += self.tabla[y][x]
#self.textoClaro = texto
print(texto)
return texto
| pordnajela/AlgoritmosCriptografiaClasica | Sustitucion_Monoalfabetica/Polybios.py | Python | apache-2.0 | 1,960 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import netsvc
class sale_order_line_make_invoice(osv.osv_memory):
_name = "sale.order.line.make.invoice"
_description = "Sale OrderLine Make_invoice"
def make_invoices(self, cr, uid, ids, context=None):
"""
To make invoices.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None: context = {}
res = False
invoices = {}
#TODO: merge with sale.py/make_invoice
def make_invoice(order, lines):
"""
To make invoices.
@param order:
@param lines:
@return:
"""
a = order.partner_id.property_account_receivable.id
if order.partner_id and order.partner_id.property_payment_term.id:
pay_term = order.partner_id.property_payment_term.id
else:
pay_term = False
inv = {
'name': order.name,
'origin': order.name,
'type': 'out_invoice',
'reference': "P%dSO%d" % (order.partner_id.id, order.id),
'account_id': a,
'partner_id': order.partner_invoice_id.id,
'invoice_line': [(6, 0, lines)],
'currency_id' : order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': pay_term,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'user_id': order.user_id and order.user_id.id or False,
'company_id': order.company_id and order.company_id.id or False,
'date_invoice': fields.date.today(),
}
inv_id = self.pool.get('account.invoice').create(cr, uid, inv)
return inv_id
sales_order_line_obj = self.pool.get('sale.order.line')
sales_order_obj = self.pool.get('sale.order')
wf_service = netsvc.LocalService('workflow')
for line in sales_order_line_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.order_id in invoices:
invoices[line.order_id] = []
line_id = sales_order_line_obj.invoice_line_create(cr, uid, [line.id])
for lid in line_id:
invoices[line.order_id].append(lid)
for order, il in invoices.items():
res = make_invoice(order, il)
cr.execute('INSERT INTO sale_order_invoice_rel \
(order_id,invoice_id) values (%s,%s)', (order.id, res))
flag = True
data_sale = sales_order_obj.browse(cr, uid, order.id, context=context)
for line in data_sale.order_line:
if not line.invoiced:
flag = False
break
if flag:
wf_service.trg_validate(uid, 'sale.order', order.id, 'manual_invoice', cr)
sales_order_obj.write(cr, uid, [order.id], {'state': 'progress'})
if not invoices:
raise osv.except_osv(_('Warning!'), _('Invoice cannot be created for this Sales Order Line due to one of the following reasons:\n1.The state of this sales order line is either "draft" or "cancel"!\n2.The Sales Order Line is Invoiced!'))
if context.get('open_invoices', False):
return self.open_invoices(cr, uid, ids, res, context=context)
return {'type': 'ir.actions.act_window_close'}
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids,
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': {'type': 'out_invoice'},
'type': 'ir.actions.act_window',
}
sale_order_line_make_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/asterisk | openerp/addons/sale/wizard/sale_line_invoice.py | Python | agpl-3.0 | 5,889 |
"""
Static Pages page for a course.
"""
from .course_page import CoursePage
class StaticPagesPage(CoursePage):
"""
Static Pages page for a course.
"""
url_path = "tabs"
def is_browser_on_page(self):
return self.is_css_present('body.view-static-pages')
| XiaodunServerGroup/xiaodun-platform | common/test/acceptance/pages/studio/edit_tabs.py | Python | agpl-3.0 | 285 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to Python generators of array data.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras.utils.data_utils import Sequence
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
def fit_generator(model,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""See docstring for `Model.fit_generator`."""
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps_per_epoch is None:
if is_sequence:
steps_per_epoch = len(generator)
else:
raise ValueError('`steps_per_epoch=None` is only valid for a'
' generator based on the `keras.utils.Sequence`'
' class. Please specify `steps_per_epoch` or use'
' the `keras.utils.Sequence` class.')
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (
hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__') or
isinstance(validation_data, Sequence))
if (val_gen and not isinstance(validation_data, Sequence) and
not validation_steps):
raise ValueError('`validation_steps=None` is only valid for a'
' generator based on the `keras.utils.Sequence`'
' class. Please specify `validation_steps` or use'
' the `keras.utils.Sequence` class.')
# Prepare display labels.
out_labels = model.metrics_names
callback_metrics = out_labels + ['val_%s' % n for n in out_labels]
# prepare callbacks
model.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(model, 'callback_model') and model.callback_model:
callback_model = model.callback_model
else:
callback_model = model
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
enqueuer = None
val_enqueuer = None
try:
if do_validation and not val_gen:
# Prepare data for validation
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'`validation_data` should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = model._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter(generator)
else:
output_generator = generator
callback_model.stop_training = False
# Construct epoch logs.
epoch_logs = {}
while epoch < epochs:
for m in model.stateful_metric_functions:
m.reset_states()
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = model.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = evaluate_generator(
model,
validation_data,
validation_steps,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size)
else:
# No need for try/except because
# data has already been validated.
val_outs = model.evaluate(
val_x,
val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
if callback_model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
try:
if enqueuer is not None:
enqueuer.stop()
finally:
if val_enqueuer is not None:
val_enqueuer.stop()
callbacks.on_train_end()
return model.history
def evaluate_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.evaluate_generator`."""
stateful_metric_indices = []
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
stateful_metric_indices = [
i for i, name in enumerate(model.metrics_names)
if str(name) in model.stateful_metric_names]
else:
stateful_metric_indices = []
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = model.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
if batch_size == 0:
raise ValueError('Received an empty batch. '
'Batches should at least contain one item.')
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
if i not in stateful_metric_indices:
averages.append(
np.average([out[i] for out in all_outs], weights=batch_sizes))
else:
averages.append(float(all_outs[-1][i]))
return averages
def predict_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.predict_generator`."""
steps_done = 0
wait_time = 0.01
all_outs = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = model.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out[0] for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
| yanchen036/tensorflow | tensorflow/python/keras/engine/training_generator.py | Python | apache-2.0 | 15,488 |
from flask import render_template, redirect, url_for, abort, flash, request,\
current_app
from flask_login import login_required, current_user
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm
from .. import db
from ..models import Permission, Role, User, Post
from ..decorators import admin_required
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and \
form.validate_on_submit():
post = Post(body=form.body.data,
author=current_user._get_current_object())
db.session.add(post)
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html', form=form, posts=posts,
pagination=pagination)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
# @admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>')
def post(id):
post = Post.query.get_or_404(id)
return render_template('post.html', posts=[post])
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
| DonYum/LogAna | app/main/views.py | Python | mit | 3,863 |
# first import everything we will need for the scheduling
import astropy.units as u
import datetime
from astropy.coordinates import EarthLocation,SkyCoord
from astropy.time import Time
from astroplan import (Observer, FixedTarget, ObservingBlock, Transitioner, PriorityScheduler, Schedule)
from astroplan.constraints import AtNightConstraint, AirmassConstraint, TimeConstraint, AltitudeConstraint
from astroplan.plots import plot_schedule_airmass
import matplotlib.pyplot as plt
class MySchedule(object):
def __init__(self):
location = EarthLocation.from_geodetic(2.581944444*u.deg, 48.88027778*u.deg, 50*u.m)
self.observer = Observer(location=location, name="Chelles", timezone="UTC")
self.global_constraints = global_constraints = [ AltitudeConstraint(min=25*u.deg,max=70*u.deg,boolean_constraint=True),
AtNightConstraint.twilight_civil()]
self.blocks = []
self.transitioner = Transitioner(.8*u.deg/u.second, {'filter':{'default': 600*u.second}})
# Initialize the scheduler
self.prior_scheduler = PriorityScheduler(constraints = self.global_constraints, observer = self.observer, transitioner = self.transitioner)
var=1
if var==1:
date=str(datetime.date.today())
beg=Time(date)+17*u.hour
end=Time(date)+(24+7)*u.hour
else:
beg=Time.now()
beg.format = 'isot'
#end=self.observer.sun_rise_time(beg,which="next")
end=beg+17*u.hour
end.format = 'isot'
# Create a schedule
self.priority_schedule = Schedule(beg, end)
print("to next sun Rise")
print('Schedule created from [',beg,'UTC to',end,'UTC ]')
def addTargetFromName(self,name,durationBlock,priority,config): # config= dictionnary
print('add target name=',name)
self.blocks.append(ObservingBlock(FixedTarget.from_name(name), durationBlock, priority, configuration = config, constraints = []))
def addTargetFromCoord(self,name,coord,durationBlock,priority,config): # config= dictionnary
print('add target name=',name)
self.blocks.append(ObservingBlock(FixedTarget(coord=coord, name=name), durationBlock, priority, configuration = config, constraints = []))
def optimize(self):
# insert blocks into the schedule, and run the scheduler
print("**************************")
print("start schedule.optimize()")
self.prior_scheduler(self.blocks, self.priority_schedule)
print("**************************")
print(self.priority_schedule)
print("**************************")
def writeLstRequest(self,filename):
print("writeLstRequest:")
self.writeLst(filename,self.blocks)
def writeLstSchedule(self,filename):
print("writeLstSchedule:")
self.writeLst(filename,self.priority_schedule.observing_blocks)
def writeLst(self,filename,lstBlocks):
print("mySchedule.writeLst ",filename)
f=open(filename,'w+')
for block in lstBlocks:
name=block.target.name
project=(block.configuration)['project']
FLUX_V=block.configuration['FLUX_V']
ExposureTime=block.configuration['ExposureTime']
NbExposure=block.configuration['NbExposure']
TotExposure=block.configuration['TotExposure']
intTime=block.configuration['intTime']
uid=block.configuration['uid']
extraConfig=block.configuration['extraConf']
calib=block.configuration['Calib']
if name!='TransitionBlock':
coord=block.target.coord.to_string('hmsdms')
(ra,dec)=coord.split(' ')
#fix cordinnate ra,dec precision to 2 digits
raLst=ra[:ra.find('m')]+'m'+"{:05.2f}".format(float(ra[ra.find('m')+1:ra.find('s')]))+"s"
decLst=dec[:dec.find('m')]+'m'+"{:05.2f}".format(float(dec[dec.find('m')+1:dec.find('s')]))+"s"
print(f"name={name} ra={ra} dec={dec} => raLst={raLst} decLst={decLst}")
line='"'+name+'" '+raLst+' '+decLst+' '
line+=str(FLUX_V)+' FALSE "Project='+str(project)
line+="&Calib="+str(calib)
line+="&uid="+str(uid)
if ExposureTime!=None: line+="&ExposureTime="+str(ExposureTime)
if NbExposure!=None: line+="&NbExposure="+str(NbExposure)
if TotExposure!=None: line+="&TotExposure="+str(TotExposure)
if intTime!=None: line+="&intTime="+str(intTime)
if extraConfig!=None: line+="&"+extraConfig
if block.start_time is not None:
block.start_time.format = 'isot'
line+="&dateStart="+str(block.start_time)
else:
line+="&priority="+str(block.priority)
line+='"'
f.write(line+'\n')
f.close
def plotAirMas(self):
plt.figure(figsize = (14,6))
plot_schedule_airmass(self.priority_schedule)
plt.tight_layout()
plt.legend(loc="upper right")
plt.show()
def showTable(self):
print(self.priority_schedule.to_table())
| tlemoult/spectroDb | myPythonLib/libsdb/MyScheduler.py | Python | mit | 4,569 |
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Morphological parser backup model"""
from sqlalchemy import Column, Sequence
from sqlalchemy.types import Integer, Unicode, UnicodeText, DateTime, Boolean
from onlinelinguisticdatabase.model.meta import Base, now
import simplejson as json
class MorphologicalParserBackup(Base):
__tablename__ = 'morphologicalparserbackup'
def __repr__(self):
return '<MorphologicalParserBackup (%s)>' % self.id
id = Column(Integer, Sequence('morphologicalparserbackup_seq_id', optional=True), primary_key=True)
morphologicalparser_id = Column(Integer)
UUID = Column(Unicode(36))
name = Column(Unicode(255))
description = Column(UnicodeText)
phonology = Column(UnicodeText)
morphology = Column(UnicodeText)
language_model = Column(UnicodeText)
enterer = Column(UnicodeText)
modifier = Column(UnicodeText)
datetime_entered = Column(DateTime)
datetime_modified = Column(DateTime, default=now)
compile_succeeded = Column(Boolean, default=False)
compile_message = Column(Unicode(255))
compile_attempt = Column(Unicode(36)) # a UUID
def vivify(self, morphological_parser_dict):
"""The vivify method gives life to a morphology_backup by specifying its
attributes using the to-be-backed-up morphology (morphological_parser_dict) and the
modifier (current user). The relational attributes of the
to-be-backed-up morphology are converted into (truncated) JSON objects.
"""
self.UUID = morphological_parser_dict['UUID']
self.morphologicalparser_id = morphological_parser_dict['id']
self.name = morphological_parser_dict['name']
self.description = morphological_parser_dict['description']
self.phonology = unicode(json.dumps(morphological_parser_dict['phonology']))
self.morphology = unicode(json.dumps(morphological_parser_dict['morphology']))
self.language_model = unicode(json.dumps(morphological_parser_dict['language_model']))
self.enterer = unicode(json.dumps(morphological_parser_dict['enterer']))
self.modifier = unicode(json.dumps(morphological_parser_dict['modifier']))
self.datetime_entered = morphological_parser_dict['datetime_entered']
self.datetime_modified = morphological_parser_dict['datetime_modified']
self.compile_succeeded = morphological_parser_dict['compile_succeeded']
self.compile_message = morphological_parser_dict['compile_message']
self.compile_attempt = morphological_parser_dict['compile_attempt']
def get_dict(self):
return {
'id': self.id,
'morphologicalparser_id': self.morphologicalparser_id,
'UUID': self.UUID,
'name': self.name,
'phonology': self.get_mini_dict_for(self.phonology),
'morphology': self.get_mini_dict_for(self.morphology),
'language_model': self.get_mini_dict_for(self.language_model),
'description': self.description,
'enterer': self.get_mini_user_dict(self.enterer),
'modifier': self.get_mini_user_dict(self.modifier),
'datetime_entered': self.datetime_entered,
'datetime_modified': self.datetime_modified,
'compile_succeeded': self.compile_succeeded,
'compile_message': self.compile_message,
'compile_attempt': self.compile_attempt
}
| jrwdunham/old | onlinelinguisticdatabase/model/morphologicalparserbackup.py | Python | apache-2.0 | 3,989 |
# Copyright (c) 2014 ProphetStor, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import re
import mock
from oslo_utils import units
from six.moves import http_client
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import utils as test_utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.prophetstor import dpl_iscsi as DPLDRIVER
from cinder.volume.drivers.prophetstor import dplcommon as DPLCOMMON
from cinder.volume import group_types
POOLUUID = 'ac33fc6e417440d5a1ef27d7231e1cc4'
VOLUMEUUID = 'a000000000000000000000000000001'
INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa'
DATA_IN_CONNECTOR = {'initiator': INITIATOR}
DATA_SERVER_INFO = 0, {
'metadata': {'vendor': 'ProphetStor',
'version': '1.5'}}
DATA_POOLS = 0, {
'children': [POOLUUID]
}
DATA_POOLINFO = 0, {
'capabilitiesURI': '',
'children': [],
'childrenrange': '',
'completionStatus': 'Complete',
'metadata': {'available_capacity': 4294967296,
'ctime': 1390551362349,
'vendor': 'prophetstor',
'version': '1.5',
'display_description': 'Default Pool',
'display_name': 'default_pool',
'event_uuid': '4f7c4d679a664857afa4d51f282a516a',
'physical_device': {'cache': [],
'data': ['disk_uuid_0',
'disk_uuid_1',
'disk_uuid_2'],
'log': [],
'spare': []},
'pool_uuid': POOLUUID,
'properties': {'raid_level': 'raid0'},
'state': 'Online',
'used_capacity': 0,
'total_capacity': 4294967296,
'zpool_guid': '8173612007304181810'},
'objectType': 'application/cdmi-container',
'percentComplete': 100}
DATA_ASSIGNVDEV = 0, {
'children': [],
'childrenrange': '',
'completionStatus': 'Complete',
'domainURI': '',
'exports': {'Network/iSCSI': [
{'logical_unit_name': '',
'logical_unit_number': '101',
'permissions': [INITIATOR],
'portals': ['172.31.1.210:3260'],
'target_identifier':
'iqn.2013-09.com.prophetstor:hypervisor.886423051816'
}]},
'metadata': {'ctime': 0,
'event_uuid': 'c11e90287e9348d0b4889695f1ec4be5',
'type': 'volume'},
'objectID': '',
'objectName': 'd827e23d403f4f12bb208a6fec208fd8',
'objectType': 'application/cdmi-container',
'parentID': '8daa374670af447e8efea27e16bf84cd',
'parentURI': '/dpl_volume',
'snapshots': []
}
DATA_OUTPUT = 0, None
MOD_OUTPUT = {'status': 'available'}
DATA_IN_GROUP = {'id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'name': 'group123',
'description': 'des123',
'status': ''}
DATA_IN_VOLUME = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4be5',
'display_name': 'abc123',
'display_description': '',
'size': 10,
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_VOLUME_VG = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee',
'display_name': 'abc123',
'display_description': '',
'size': 10,
'group_id':
'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'status': 'available',
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_REMOVE_VOLUME_VG = {
'id': 'fe2dbc515810451dab2f8c8a48d15bee',
'display_name': 'fe2dbc515810451dab2f8c8a48d15bee',
'display_description': '',
'size': 10,
'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'status': 'available',
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_VOLUME1 = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4bef',
'display_name': 'abc456',
'display_description': '',
'size': 10,
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_CG_SNAPSHOT = {
'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'id': 'cgsnapshot1',
'name': 'cgsnapshot1',
'description': 'cgsnapshot1',
'status': ''}
DATA_IN_SNAPSHOT = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee',
'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5',
'display_name': 'snapshot1',
'display_description': '',
'volume_size': 5}
DATA_OUT_SNAPSHOT_CG = {
'id': 'snapshot1',
'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5',
'display_name': 'snapshot1',
'display_description': '',
'group_snapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'}
DATA_OUT_CG = {
"objectType": "application/cdmi-container",
"objectID": "fe2dbc515810451dab2f8c8a48d15bee",
"objectName": "<new_volume_group_uuid>",
"parentURI": "/dpl_volgroup",
"parentID": "fe2dbc515810451dab2f8c8a48d15bee",
"domainURI": "",
"capabilitiesURI": "",
"completionStatus": "Complete",
"percentComplete": 100,
"metadata":
{
"type": "volume|snapshot|replica",
"volume_group_uuid": "<volume_group_uuid>",
"origin_uuid": "<origin_uuid>",
"snapshot_uuid": "<snapshot_uuid>",
"display_name": "<display name>",
"display_description": "<display description>",
"ctime": 12345678,
"total_capacity": 1024,
"snapshot_used_capacity": 0,
"maximum_snapshot": 1024,
"snapshot_quota": 0,
"state": "<state>",
"properties":
{
"snapshot_rotation": True,
}
},
"childrenrange": "<range>",
"children":
[
'fe2dbc515810451dab2f8c8a48d15bee',
],
}
class TestProphetStorDPLVolume(test.TestCase):
def _gen_snapshot_url(self, vdevid, snapshotid):
snapshot_url = '/%s/%s/%s' % (vdevid, DPLCOMMON.DPL_OBJ_SNAPSHOT,
snapshotid)
return snapshot_url
def setUp(self):
super(TestProphetStorDPLVolume, self).setUp()
self.dplcmd = DPLCOMMON.DPLVolume('1.1.1.1', 8356, 'admin', 'password')
self.DPL_MOCK = mock.MagicMock()
self.dplcmd.objCmd = self.DPL_MOCK
self.DPL_MOCK.send_cmd.return_value = DATA_OUTPUT
def test_getserverinfo(self):
self.dplcmd.get_server_info()
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_SYSTEM),
None,
[http_client.OK, http_client.ACCEPTED])
def test_createvdev(self):
self.dplcmd.create_vdev(DATA_IN_VOLUME['id'],
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
POOLUUID,
int(DATA_IN_VOLUME['size']) * units.Gi)
metadata = {}
metadata['display_name'] = DATA_IN_VOLUME['display_name']
metadata['display_description'] = DATA_IN_VOLUME['display_description']
metadata['pool_uuid'] = POOLUUID
metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi
metadata['maximum_snapshot'] = 1024
metadata['properties'] = dict(thin_provision=True)
params = {}
params['metadata'] = metadata
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[http_client.OK, http_client.ACCEPTED, http_client.CREATED])
def test_extendvdev(self):
self.dplcmd.extend_vdev(DATA_IN_VOLUME['id'],
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
int(DATA_IN_VOLUME['size']) * units.Gi)
metadata = {}
metadata['display_name'] = DATA_IN_VOLUME['display_name']
metadata['display_description'] = DATA_IN_VOLUME['display_description']
metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi
metadata['maximum_snapshot'] = 1024
params = {}
params['metadata'] = metadata
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[http_client.OK, http_client.ACCEPTED, http_client.CREATED])
def test_deletevdev(self):
self.dplcmd.delete_vdev(DATA_IN_VOLUME['id'], True)
metadata = {}
params = {}
metadata['force'] = True
params['metadata'] = metadata
self.DPL_MOCK.send_cmd.assert_called_once_with(
'DELETE',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND,
http_client.NO_CONTENT])
def test_createvdevfromsnapshot(self):
self.dplcmd.create_vdev_from_snapshot(
DATA_IN_VOLUME['id'],
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
DATA_IN_SNAPSHOT['id'],
POOLUUID)
metadata = {}
params = {}
metadata['snapshot_operation'] = 'copy'
metadata['display_name'] = DATA_IN_VOLUME['display_name']
metadata['display_description'] = DATA_IN_VOLUME['display_description']
metadata['pool_uuid'] = POOLUUID
metadata['maximum_snapshot'] = 1024
metadata['properties'] = dict(thin_provision=True)
params['metadata'] = metadata
params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[http_client.OK, http_client.ACCEPTED, http_client.CREATED])
def test_getpool(self):
self.dplcmd.get_pool(POOLUUID)
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL,
POOLUUID),
None,
[http_client.OK, http_client.ACCEPTED])
def test_clonevdev(self):
self.dplcmd.clone_vdev(
DATA_IN_VOLUME['id'],
DATA_IN_VOLUME1['id'],
POOLUUID,
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
int(DATA_IN_VOLUME['size']) * units.Gi
)
metadata = {}
params = {}
metadata["snapshot_operation"] = "clone"
metadata["display_name"] = DATA_IN_VOLUME['display_name']
metadata["display_description"] = DATA_IN_VOLUME['display_description']
metadata["pool_uuid"] = POOLUUID
metadata["total_capacity"] = int(DATA_IN_VOLUME['size']) * units.Gi
metadata['maximum_snapshot'] = 1024
metadata['properties'] = dict(thin_provision=True)
params["metadata"] = metadata
params["copy"] = DATA_IN_VOLUME['id']
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME1['id']),
params,
[http_client.OK, http_client.CREATED, http_client.ACCEPTED])
def test_createvdevsnapshot(self):
self.dplcmd.create_vdev_snapshot(
DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'],
DATA_IN_SNAPSHOT['display_name'],
DATA_IN_SNAPSHOT['display_description']
)
metadata = {}
params = {}
metadata['display_name'] = DATA_IN_SNAPSHOT['display_name']
metadata['display_description'] = (
DATA_IN_SNAPSHOT['display_description'])
params['metadata'] = metadata
params['snapshot'] = DATA_IN_SNAPSHOT['id']
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[http_client.OK, http_client.CREATED, http_client.ACCEPTED])
def test_getvdev(self):
self.dplcmd.get_vdev(DATA_IN_VOLUME['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
None,
[http_client.OK, http_client.ACCEPTED, http_client.NOT_FOUND])
def test_getvdevstatus(self):
self.dplcmd.get_vdev_status(DATA_IN_VOLUME['id'], '123456')
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id'],
'123456'),
None,
[http_client.OK, http_client.NOT_FOUND])
def test_getpoolstatus(self):
self.dplcmd.get_pool_status(POOLUUID, '123456')
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_POOL,
POOLUUID,
'123456'),
None,
[http_client.OK, http_client.NOT_FOUND])
def test_assignvdev(self):
self.dplcmd.assign_vdev(
DATA_IN_VOLUME['id'],
'iqn.1993-08.org.debian:01:test1',
'',
'1.1.1.1:3260',
0
)
params = {}
metadata = {}
exports = {}
metadata['export_operation'] = 'assign'
exports['Network/iSCSI'] = {}
target_info = {}
target_info['logical_unit_number'] = 0
target_info['logical_unit_name'] = ''
permissions = []
portals = []
portals.append('1.1.1.1:3260')
permissions.append('iqn.1993-08.org.debian:01:test1')
target_info['permissions'] = permissions
target_info['portals'] = portals
exports['Network/iSCSI'] = target_info
params['metadata'] = metadata
params['exports'] = exports
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[http_client.OK, http_client.ACCEPTED, http_client.CREATED])
def test_unassignvdev(self):
self.dplcmd.unassign_vdev(DATA_IN_VOLUME['id'],
'iqn.1993-08.org.debian:01:test1',
'')
params = {}
metadata = {}
exports = {}
metadata['export_operation'] = 'unassign'
params['metadata'] = metadata
exports['Network/iSCSI'] = {}
exports['Network/iSCSI']['target_identifier'] = ''
permissions = []
permissions.append('iqn.1993-08.org.debian:01:test1')
exports['Network/iSCSI']['permissions'] = permissions
params['exports'] = exports
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[http_client.OK, http_client.ACCEPTED,
http_client.NO_CONTENT, http_client.NOT_FOUND])
def test_deletevdevsnapshot(self):
self.dplcmd.delete_vdev_snapshot(DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'])
params = {}
params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'DELETE',
'/%s/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id'],
DPLCOMMON.DPL_OBJ_SNAPSHOT,
DATA_IN_SNAPSHOT['id']),
None,
[http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT,
http_client.NOT_FOUND])
def test_listvdevsnapshots(self):
self.dplcmd.list_vdev_snapshots(DATA_IN_VOLUME['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id'],
DPLCOMMON.DPL_OBJ_SNAPSHOT),
None,
[http_client.OK])
class TestProphetStorDPLDriver(test.TestCase):
def __init__(self, method):
super(TestProphetStorDPLDriver, self).__init__(method)
def _conver_uuid2hex(self, strID):
return strID.replace('-', '')
def setUp(self):
super(TestProphetStorDPLDriver, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.san_ip = '1.1.1.1'
self.configuration.dpl_port = 8356
self.configuration.san_login = 'admin'
self.configuration.san_password = 'password'
self.configuration.dpl_pool = POOLUUID
self.configuration.iscsi_port = 3260
self.configuration.san_is_local = False
self.configuration.san_thin_provision = True
self.configuration.driver_ssl_cert_verify = False
self.configuration.driver_ssl_cert_path = None
self.context = context.get_admin_context()
self.DPL_MOCK = mock.MagicMock()
self.DB_MOCK = mock.MagicMock()
self.dpldriver = DPLDRIVER.DPLISCSIDriver(
configuration=self.configuration)
self.dpldriver.dpl = self.DPL_MOCK
self.dpldriver.db = self.DB_MOCK
self.dpldriver.do_setup(self.context)
def test_get_volume_stats(self):
self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO
self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO
res = self.dpldriver.get_volume_stats(True)
self.assertEqual('ProphetStor', res['vendor_name'])
self.assertEqual('1.5', res['driver_version'])
pool = res["pools"][0]
self.assertEqual(4, pool['total_capacity_gb'])
self.assertEqual(4, pool['free_capacity_gb'])
self.assertEqual(0, pool['reserved_percentage'])
self.assertFalse(pool['QoS_support'])
def test_create_volume(self):
volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME['id'],
display_name=DATA_IN_VOLUME['display_name'],
size=DATA_IN_VOLUME['size'],
host=DATA_IN_VOLUME['host'])
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.dpldriver.create_volume(volume)
self.DPL_MOCK.create_vdev.assert_called_once_with(
self._conver_uuid2hex(volume.id),
volume.display_name,
volume.display_description,
self.configuration.dpl_pool,
int(volume.size) * units.Gi,
True)
def test_create_volume_without_pool(self):
volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME['id'],
display_name=DATA_IN_VOLUME['display_name'],
size=DATA_IN_VOLUME['size'],
host=DATA_IN_VOLUME['host'])
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.configuration.dpl_pool = ""
volume.host = "host@backend" # missing pool
self.assertRaises(exception.InvalidHost, self.dpldriver.create_volume,
volume=volume)
def test_create_volume_with_configuration_pool(self):
volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME['id'],
display_name=DATA_IN_VOLUME['display_name'],
size=DATA_IN_VOLUME['size'],
host="host@backend")
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.dpldriver.create_volume(volume)
self.DPL_MOCK.create_vdev.assert_called_once_with(
self._conver_uuid2hex(volume.id),
volume.display_name, volume.display_description,
self.configuration.dpl_pool, int(volume.size) * units.Gi, True)
def test_create_volume_of_group(self):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
group = test_utils.create_group(
self.context,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=group_type.id)
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT
volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME_VG['id'],
display_name=DATA_IN_VOLUME_VG['display_name'],
size=DATA_IN_VOLUME_VG['size'],
group_id=group.id,
host=DATA_IN_VOLUME_VG['host'])
self.dpldriver.create_volume(volume)
self.DPL_MOCK.create_vdev.assert_called_once_with(
self._conver_uuid2hex(volume.id),
volume.display_name,
volume.display_description,
self.configuration.dpl_pool,
int(volume.size) * units.Gi,
True)
self.DPL_MOCK.join_vg.assert_called_once_with(
self._conver_uuid2hex(volume.id),
self._conver_uuid2hex(volume.group_id))
def test_delete_volume(self):
volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME['id'],
display_name=DATA_IN_VOLUME['display_name'],
size=DATA_IN_VOLUME['size'],
host=DATA_IN_VOLUME['host'])
self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
self.dpldriver.delete_volume(volume)
self.DPL_MOCK.delete_vdev.assert_called_once_with(
self._conver_uuid2hex(volume.id))
def test_delete_volume_of_group(self):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
group = test_utils.create_group(
self.context,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=group_type.id)
volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME_VG['id'],
display_name=DATA_IN_VOLUME_VG['display_name'],
size=DATA_IN_VOLUME_VG['size'],
group_id=group.id,
host=DATA_IN_VOLUME_VG['host'])
self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.leave_vg.return_volume = DATA_OUTPUT
self.dpldriver.delete_volume(volume)
self.DPL_MOCK.leave_vg.assert_called_once_with(
self._conver_uuid2hex(volume.id),
self._conver_uuid2hex(volume.group_id)
)
self.DPL_MOCK.delete_vdev.assert_called_once_with(
self._conver_uuid2hex(volume.id))
def test_create_volume_from_snapshot(self):
self.DPL_MOCK.create_vdev_from_snapshot.return_value = DATA_OUTPUT
self.DPL_MOCK.extend_vdev.return_value = DATA_OUTPUT
volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME_VG['id'],
display_name=DATA_IN_VOLUME_VG['display_name'],
size=DATA_IN_VOLUME_VG['size'],
host=DATA_IN_VOLUME_VG['host'])
self.dpldriver.create_volume_from_snapshot(
volume, DATA_IN_SNAPSHOT)
self.DPL_MOCK.create_vdev_from_snapshot.assert_called_once_with(
self._conver_uuid2hex(volume.id),
volume.display_name,
volume.display_description,
self._conver_uuid2hex(volume.id),
self.configuration.dpl_pool,
True)
self.DPL_MOCK.extend_vdev.assert_called_once_with(
self._conver_uuid2hex(volume.id),
volume.display_name,
volume.display_description,
volume.size * units.Gi)
def test_create_cloned_volume(self):
new_volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME1['id'],
display_name=DATA_IN_VOLUME1['display_name'],
size=DATA_IN_VOLUME1['size'],
host=DATA_IN_VOLUME1['host'])
src_volume = test_utils.create_volume(
self.context,
id=DATA_IN_VOLUME['id'])
self.DPL_MOCK.clone_vdev.return_value = DATA_OUTPUT
self.dpldriver.create_cloned_volume(new_volume, src_volume)
self.DPL_MOCK.clone_vdev.assert_called_once_with(
self._conver_uuid2hex(src_volume.id),
self._conver_uuid2hex(new_volume.id),
self.configuration.dpl_pool,
new_volume.display_name,
new_volume.display_description,
int(new_volume.size) *
units.Gi,
True)
def test_create_snapshot(self):
self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT
self.dpldriver.create_snapshot(DATA_IN_SNAPSHOT)
self.DPL_MOCK.create_vdev_snapshot.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']),
self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']),
DATA_IN_SNAPSHOT['display_name'],
DATA_IN_SNAPSHOT['display_description'])
def test_delete_snapshot(self):
self.DPL_MOCK.delete_vdev_snapshot.return_value = DATA_OUTPUT
self.dpldriver.delete_snapshot(DATA_IN_SNAPSHOT)
self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']),
self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']))
def test_initialize_connection(self):
self.DPL_MOCK.assign_vdev.return_value = DATA_ASSIGNVDEV
self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV
res = self.dpldriver.initialize_connection(DATA_IN_VOLUME,
DATA_IN_CONNECTOR)
self.assertEqual('iscsi', res['driver_volume_type'])
self.assertEqual(101, res['data']['target_lun'])
self.assertTrue(res['data']['target_discovered'])
self.assertEqual('172.31.1.210:3260', res['data']['target_portal'])
self.assertEqual(
'iqn.2013-09.com.prophetstor:hypervisor.886423051816',
res['data']['target_iqn'])
def test_terminate_connection(self):
self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT
self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR)
self.DPL_MOCK.unassign_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_CONNECTOR['initiator'])
def test_terminate_connection_volume_detached(self):
self.DPL_MOCK.unassign_vdev.return_value = errno.ENODATA, None
self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR)
self.DPL_MOCK.unassign_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_CONNECTOR['initiator'])
def test_terminate_connection_failed(self):
self.DPL_MOCK.unassign_vdev.return_value = errno.EFAULT, None
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.dpldriver.terminate_connection,
volume=DATA_IN_VOLUME, connector=DATA_IN_CONNECTOR)
self.assertTrue(
re.match(r".*Flexvisor failed", ex.msg))
def test_get_pool_info(self):
self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO
_, res = self.dpldriver._get_pool_info(POOLUUID)
self.assertEqual(4294967296, res['metadata']['available_capacity'])
self.assertEqual(1390551362349, res['metadata']['ctime'])
self.assertEqual('Default Pool',
res['metadata']['display_description'])
self.assertEqual('default_pool',
res['metadata']['display_name'])
self.assertEqual('4f7c4d679a664857afa4d51f282a516a',
res['metadata']['event_uuid'])
self.assertEqual(
{'cache': [],
'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'],
'log': [],
'spare': []},
res['metadata']['physical_device'])
self.assertEqual(POOLUUID, res['metadata']['pool_uuid'])
self.assertEqual(
{'raid_level': 'raid0'},
res['metadata']['properties'])
self.assertEqual('Online', res['metadata']['state'])
self.assertEqual(4294967296, res['metadata']['total_capacity'])
self.assertEqual('8173612007304181810', res['metadata']['zpool_guid'])
def test_create_group(self):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
group = test_utils.create_group(
self.context,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=group_type.id)
self.DPL_MOCK.create_vg.return_value = DATA_OUTPUT
model_update = self.dpldriver.create_group(self.context, group)
self.DPL_MOCK.create_vg.assert_called_once_with(
self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID),
'test_group',
'this is a test group')
self.assertDictEqual({'status': (
fields.ConsistencyGroupStatus.AVAILABLE)}, model_update)
def test_delete_group(self):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
group = test_utils.create_group(
self.context,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=group_type.id)
self.DB_MOCK.volume_get_all_by_group.return_value = (
[DATA_IN_VOLUME_VG])
self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT
model_update, volumes = self.dpldriver.delete_group(
self.context, group, [])
self.DPL_MOCK.delete_vg.assert_called_once_with(
self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID))
self.DPL_MOCK.delete_vdev.assert_called_once_with(
self._conver_uuid2hex((DATA_IN_VOLUME_VG['id'])))
self.assertDictEqual({'status': (
fields.ConsistencyGroupStatus.DELETED)}, model_update)
def test_update_group(self):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT
self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT
group = test_utils.create_group(
self.context,
id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
host='host@backend#unit_test_pool',
group_type_id=group_type.id)
vol_add = test_utils.create_volume(
self.context,
id=fake_constants.VOLUME2_ID,
display_name=DATA_IN_VOLUME_VG['display_name'],
size=DATA_IN_VOLUME_VG['size'],
group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
host=DATA_IN_VOLUME_VG['host'])
vol_del = test_utils.create_volume(
self.context,
id=DATA_IN_REMOVE_VOLUME_VG['id'],
display_name=DATA_IN_REMOVE_VOLUME_VG['display_name'],
size=DATA_IN_REMOVE_VOLUME_VG['size'],
group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
host=DATA_IN_REMOVE_VOLUME_VG['host'])
(model_update, add_vols, remove_vols) = (
self.dpldriver.update_group(
self.context, group, [vol_add], [vol_del]))
self.DPL_MOCK.join_vg.assert_called_once_with(
self._conver_uuid2hex(vol_add.id),
self._conver_uuid2hex(group.id))
self.DPL_MOCK.leave_vg.assert_called_once_with(
self._conver_uuid2hex(vol_del.id),
self._conver_uuid2hex(group.id))
self.assertDictEqual({'status': (
fields.ConsistencyGroupStatus.AVAILABLE)}, model_update)
def test_update_group_exception_join(self):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
self.DPL_MOCK.join_vg.return_value = -1, None
self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT
volume = test_utils.create_volume(
self.context,
id=fake_constants.VOLUME2_ID,
display_name=DATA_IN_VOLUME_VG['display_name'],
size=DATA_IN_VOLUME_VG['size'],
host=DATA_IN_VOLUME_VG['host'])
group = test_utils.create_group(
self.context,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=group_type.id)
self.assertRaises(exception.VolumeBackendAPIException,
self.dpldriver.update_group,
context=None,
group=group,
add_volumes=[volume],
remove_volumes=None)
def test_update_group_exception_leave(self):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG)
self.DPL_MOCK.leave_vg.return_value = -1, None
volume = test_utils.create_volume(
self.context,
id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
display_name=DATA_IN_VOLUME_VG['display_name'],
size=DATA_IN_VOLUME_VG['size'],
host=DATA_IN_VOLUME_VG['host'])
group = test_utils.create_group(
self.context,
id=fake_constants.CONSISTENCY_GROUP_ID,
host='host@backend#unit_test_pool',
group_type_id=group_type.id)
self.assertRaises(exception.VolumeBackendAPIException,
self.dpldriver.update_group,
context=None,
group=group,
add_volumes=None,
remove_volumes=[volume])
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot')
def test_create_group_snapshot(self, get_all_for_group_snapshot):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context)
snapshot_obj.group_id = \
DATA_IN_CG_SNAPSHOT['group_id']
snapshot_obj.group_type_id = group_type.id
get_all_for_group_snapshot.return_value = [snapshot_obj]
self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT
model_update, snapshots = self.dpldriver.create_group_snapshot(
self.context, snapshot_obj, [])
self.assertDictEqual({'status': 'available'}, model_update)
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot')
def test_delete_group_snapshot(self, get_all_for_group_snapshot):
group_type = group_types.create(
self.context,
'group',
{'consistent_group_snapshot_enabled': '<is> True'}
)
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context)
snapshot_obj.group_id = \
DATA_IN_CG_SNAPSHOT['group_id']
snapshot_obj.group_type_id = group_type.id
get_all_for_group_snapshot.return_value = [snapshot_obj]
self.DPL_MOCK.delete_group_snapshot.return_value = DATA_OUTPUT
model_update, snapshots = self.dpldriver.delete_group_snapshot(
self.context, snapshot_obj, [])
self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with(
self._conver_uuid2hex(snapshot_obj.group_id),
self._conver_uuid2hex(snapshot_obj.id),
True)
self.assertDictEqual({'status': 'deleted'}, model_update)
| eharney/cinder | cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py | Python | apache-2.0 | 38,530 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("addressbase", "0002_auto_20170211_1533")]
operations = [
migrations.CreateModel(
name="Blacklist",
fields=[
(
"id",
models.AutoField(
serialize=False,
primary_key=True,
verbose_name="ID",
auto_created=True,
),
),
("postcode", models.CharField(max_length=15, db_index=True)),
("lad", models.CharField(max_length=9)),
],
),
migrations.AlterUniqueTogether(
name="blacklist", unique_together=set([("postcode", "lad")])
),
]
| DemocracyClub/UK-Polling-Stations | polling_stations/apps/addressbase/migrations/0003_auto_20170406_0954.py | Python | bsd-3-clause | 899 |
cypher = input()
per = "PER"
count = 0
for i in range(len(cypher)):
if cypher[i] != per[i%3]:
count +=1
print(count) | rvrheenen/OpenKattis | Python/conundrum/conundrum.py | Python | mit | 128 |
#!/usr/bin/env python
# Copyright 2016 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Updates build-version.inc in the current directory, unless the update is
# identical to the existing content.
#
# Args: <spirv-tools_dir>
#
# For each directory, there will be a line in build-version.inc containing that
# directory's "git describe" output enclosed in double quotes and appropriately
# escaped.
from __future__ import print_function
import datetime
import os.path
import subprocess
import sys
OUTFILE = 'build-version.inc'
def command_output(cmd, dir):
"""Runs a command in a directory and returns its standard output stream.
Captures the standard error stream.
Raises a RuntimeError if the command fails to launch or otherwise fails.
"""
p = subprocess.Popen(cmd,
cwd=dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, _) = p.communicate()
if p.returncode != 0:
raise RuntimeError('Failed to run %s in %s' % (cmd, dir))
return stdout
def describe(dir):
"""Returns a string describing the current Git HEAD version as descriptively
as possible.
Runs 'git describe', or alternately 'git rev-parse HEAD', in dir. If
successful, returns the output; otherwise returns 'unknown hash, <date>'."""
try:
# decode() is needed here for Python3 compatibility. In Python2,
# str and bytes are the same type, but not in Python3.
# Popen.communicate() returns a bytes instance, which needs to be
# decoded into text data first in Python3. And this decode() won't
# hurt Python2.
return command_output(['git', 'describe'], dir).rstrip().decode()
except:
try:
return command_output(
['git', 'rev-parse', 'HEAD'], dir).rstrip().decode()
except:
return 'unknown hash, {}'.format(datetime.date.today().isoformat())
def main():
if len(sys.argv) != 2:
print('usage: {0} <spirv-tools_dir>'.format(sys.argv[0]))
sys.exit(1)
new_content = '"spirv-tools {}\\n"\n'.format(
describe(sys.argv[1]).replace('"', '\\"'))
if os.path.isfile(OUTFILE) and new_content == open(OUTFILE, 'r').read():
sys.exit(0)
open(OUTFILE, 'w').write(new_content)
if __name__ == '__main__':
main()
| ilangal-amd/CodeXL | Common/Lib/Ext/Vulkan/1.0.8.0/SDK/win/spirv-tools/utils/update_build_version.py | Python | mit | 2,914 |
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import unittest
import dcm.agent.messaging.alert_msg as alert_msg
import dcm.agent.tests.utils.general as test_utils
from dcm.agent.events.globals import global_space as dcm_events
class TestAlertMessaging(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
pass
def _make_fake_alert_message(self):
alert_doc = {"somekey": "value",
'alert_timestamp': 10.0,
'subject': 'fake subject',
'message': 'some alert message'}
return alert_doc
def test_simple_alert(self):
alert_doc = self._make_fake_alert_message()
conn = mock.Mock()
alerter = alert_msg.AlertAckMsg(alert_doc, conn)
alerter.send()
alerter.incoming_message()
conn.send.assert_called_once_with(alert_doc)
alerter.stop()
self.assertEqual(alerter._sm._current_state, "COMPLETE")
def test_alert_retransmission(self):
timeout = 0.1
alert_doc = self._make_fake_alert_message()
conn = mock.Mock()
alerter = alert_msg.AlertAckMsg(alert_doc, conn, timeout=timeout)
alerter.send()
dcm_events.poll(timeblock=timeout*1.5)
alerter.incoming_message()
call = mock.call(alert_doc)
self.assertGreaterEqual(conn.send.call_count, 2)
self.assertEqual(conn.send.call_args_list[0], call)
def test_twosends_two_acks(self):
timeout = 0.1
alert_doc = self._make_fake_alert_message()
conn = mock.Mock()
alerter = alert_msg.AlertAckMsg(alert_doc, conn, timeout=timeout)
alerter.send()
dcm_events.poll(timeblock=timeout*1.5)
self.assertGreaterEqual(conn.send.call_count, 2)
def test_stop_before_done(self):
timeout = 0.1
alert_doc = self._make_fake_alert_message()
conn = mock.Mock()
alerter = alert_msg.AlertAckMsg(alert_doc, conn, timeout=timeout)
alerter.send()
alerter.stop()
self.assertEqual(alerter._sm._current_state, "COMPLETE")
def test_stop_before_send(self):
timeout = 0.1
alert_doc = self._make_fake_alert_message()
conn = mock.Mock()
alerter = alert_msg.AlertAckMsg(alert_doc, conn, timeout=timeout)
alerter.stop()
self.assertEqual(alerter._sm._current_state, "COMPLETE")
| JPWKU/unix-agent | src/dcm/agent/tests/unit/messaging/test_alert.py | Python | apache-2.0 | 3,011 |
import re
# Regular expression for recognizing HEXEWKB and WKT. A prophylactic measure
# to prevent potentially malicious input from reaching the underlying C
# library. Not a substitute for good Web security programming practices.
hex_regex = re.compile(r'^[0-9A-F]+$', re.I)
wkt_regex = re.compile(r'^(SRID=(?P<srid>\-?\d+);)?'
r'(?P<wkt>'
r'(?P<type>POINT|LINESTRING|LINEARRING|POLYGON|MULTIPOINT|'
r'MULTILINESTRING|MULTIPOLYGON|GEOMETRYCOLLECTION)'
r'[ACEGIMLONPSRUTYZ\d,\.\-\+\(\) ]+)$',
re.I)
json_regex = re.compile(r'^(\s+)?\{.*}(\s+)?$', re.DOTALL)
| sametmax/Django--an-app-at-a-time | ignore_this_directory/django/contrib/gis/geometry.py | Python | mit | 677 |
"""
PyTorch policy class used for APPO.
Adapted from VTraceTFPolicy to use the PPO surrogate loss.
Keep in sync with changes to VTraceTFPolicy.
"""
import gym
import numpy as np
import logging
from typing import Type
import ray.rllib.agents.impala.vtrace_torch as vtrace
from ray.rllib.agents.impala.vtrace_torch_policy import make_time_major, \
choose_optimizer
from ray.rllib.agents.ppo.appo_tf_policy import make_appo_model, \
postprocess_trajectory
from ray.rllib.agents.ppo.ppo_torch_policy import ValueNetworkMixin, \
KLCoeffMixin
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import \
TorchDistributionWrapper, TorchCategorical
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import apply_grad_clipping, explained_variance,\
global_norm, sequence_mask
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
def appo_surrogate_loss(policy: Policy, model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch) -> TensorType:
"""Constructs the loss for APPO.
With IS modifications and V-trace for Advantage Estimation.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]: The action distr. class.
train_batch (SampleBatch): The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
model_out, _ = model.from_batch(train_batch)
action_dist = dist_class(model_out, model)
if isinstance(policy.action_space, gym.spaces.Discrete):
is_multidiscrete = False
output_hidden_shape = [policy.action_space.n]
elif isinstance(policy.action_space,
gym.spaces.multi_discrete.MultiDiscrete):
is_multidiscrete = True
output_hidden_shape = policy.action_space.nvec.astype(np.int32)
else:
is_multidiscrete = False
output_hidden_shape = 1
def _make_time_major(*args, **kw):
return make_time_major(policy, train_batch.get("seq_lens"), *args,
**kw)
actions = train_batch[SampleBatch.ACTIONS]
dones = train_batch[SampleBatch.DONES]
rewards = train_batch[SampleBatch.REWARDS]
behaviour_logits = train_batch[SampleBatch.ACTION_DIST_INPUTS]
target_model_out, _ = policy.target_model.from_batch(train_batch)
prev_action_dist = dist_class(behaviour_logits, policy.model)
values = policy.model.value_function()
values_time_major = _make_time_major(values)
policy.model_vars = policy.model.variables()
policy.target_model_vars = policy.target_model.variables()
if policy.is_recurrent():
max_seq_len = torch.max(train_batch["seq_lens"]) - 1
mask = sequence_mask(train_batch["seq_lens"], max_seq_len)
mask = torch.reshape(mask, [-1])
num_valid = torch.sum(mask)
def reduce_mean_valid(t):
return torch.sum(t * mask) / num_valid
else:
reduce_mean_valid = torch.mean
if policy.config["vtrace"]:
logger.debug("Using V-Trace surrogate loss (vtrace=True)")
old_policy_behaviour_logits = target_model_out.detach()
old_policy_action_dist = dist_class(old_policy_behaviour_logits, model)
if isinstance(output_hidden_shape, (list, tuple, np.ndarray)):
unpacked_behaviour_logits = torch.split(
behaviour_logits, list(output_hidden_shape), dim=1)
unpacked_old_policy_behaviour_logits = torch.split(
old_policy_behaviour_logits, list(output_hidden_shape), dim=1)
else:
unpacked_behaviour_logits = torch.chunk(
behaviour_logits, output_hidden_shape, dim=1)
unpacked_old_policy_behaviour_logits = torch.chunk(
old_policy_behaviour_logits, output_hidden_shape, dim=1)
# Prepare actions for loss.
loss_actions = actions if is_multidiscrete else torch.unsqueeze(
actions, dim=1)
# Prepare KL for loss.
action_kl = _make_time_major(
old_policy_action_dist.kl(action_dist), drop_last=True)
# Compute vtrace on the CPU for better perf.
vtrace_returns = vtrace.multi_from_logits(
behaviour_policy_logits=_make_time_major(
unpacked_behaviour_logits, drop_last=True),
target_policy_logits=_make_time_major(
unpacked_old_policy_behaviour_logits, drop_last=True),
actions=torch.unbind(
_make_time_major(loss_actions, drop_last=True), dim=2),
discounts=(1.0 - _make_time_major(dones, drop_last=True).float()) *
policy.config["gamma"],
rewards=_make_time_major(rewards, drop_last=True),
values=values_time_major[:-1], # drop-last=True
bootstrap_value=values_time_major[-1],
dist_class=TorchCategorical if is_multidiscrete else dist_class,
model=model,
clip_rho_threshold=policy.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=policy.config[
"vtrace_clip_pg_rho_threshold"])
actions_logp = _make_time_major(
action_dist.logp(actions), drop_last=True)
prev_actions_logp = _make_time_major(
prev_action_dist.logp(actions), drop_last=True)
old_policy_actions_logp = _make_time_major(
old_policy_action_dist.logp(actions), drop_last=True)
is_ratio = torch.clamp(
torch.exp(prev_actions_logp - old_policy_actions_logp), 0.0, 2.0)
logp_ratio = is_ratio * torch.exp(actions_logp - prev_actions_logp)
policy._is_ratio = is_ratio
advantages = vtrace_returns.pg_advantages.to(policy.device)
surrogate_loss = torch.min(
advantages * logp_ratio,
advantages *
torch.clamp(logp_ratio, 1 - policy.config["clip_param"],
1 + policy.config["clip_param"]))
mean_kl = reduce_mean_valid(action_kl)
mean_policy_loss = -reduce_mean_valid(surrogate_loss)
# The value function loss.
value_targets = vtrace_returns.vs.to(policy.device)
delta = values_time_major[:-1] - value_targets
mean_vf_loss = 0.5 * reduce_mean_valid(torch.pow(delta, 2.0))
# The entropy loss.
mean_entropy = reduce_mean_valid(
_make_time_major(action_dist.entropy(), drop_last=True))
else:
logger.debug("Using PPO surrogate loss (vtrace=False)")
# Prepare KL for Loss
action_kl = _make_time_major(prev_action_dist.kl(action_dist))
actions_logp = _make_time_major(action_dist.logp(actions))
prev_actions_logp = _make_time_major(prev_action_dist.logp(actions))
logp_ratio = torch.exp(actions_logp - prev_actions_logp)
advantages = _make_time_major(train_batch[Postprocessing.ADVANTAGES])
surrogate_loss = torch.min(
advantages * logp_ratio,
advantages *
torch.clamp(logp_ratio, 1 - policy.config["clip_param"],
1 + policy.config["clip_param"]))
mean_kl = reduce_mean_valid(action_kl)
mean_policy_loss = -reduce_mean_valid(surrogate_loss)
# The value function loss.
value_targets = _make_time_major(
train_batch[Postprocessing.VALUE_TARGETS])
delta = values_time_major - value_targets
mean_vf_loss = 0.5 * reduce_mean_valid(torch.pow(delta, 2.0))
# The entropy loss.
mean_entropy = reduce_mean_valid(
_make_time_major(action_dist.entropy()))
# The summed weighted loss
total_loss = mean_policy_loss + \
mean_vf_loss * policy.config["vf_loss_coeff"] - \
mean_entropy * policy.config["entropy_coeff"]
# Optional additional KL Loss
if policy.config["use_kl_loss"]:
total_loss += policy.kl_coeff * mean_kl
policy._total_loss = total_loss
policy._mean_policy_loss = mean_policy_loss
policy._mean_kl = mean_kl
policy._mean_vf_loss = mean_vf_loss
policy._mean_entropy = mean_entropy
policy._value_targets = value_targets
return total_loss
def stats(policy: Policy, train_batch: SampleBatch):
"""Stats function for APPO. Returns a dict with important loss stats.
Args:
policy (Policy): The Policy to generate stats for.
train_batch (SampleBatch): The SampleBatch (already) used for training.
Returns:
Dict[str, TensorType]: The stats dict.
"""
values_batched = make_time_major(
policy,
train_batch.get("seq_lens"),
policy.model.value_function(),
drop_last=policy.config["vtrace"])
stats_dict = {
"cur_lr": policy.cur_lr,
"policy_loss": policy._mean_policy_loss,
"entropy": policy._mean_entropy,
"var_gnorm": global_norm(policy.model.trainable_variables()),
"vf_loss": policy._mean_vf_loss,
"vf_explained_var": explained_variance(
torch.reshape(policy._value_targets, [-1]),
torch.reshape(values_batched, [-1])),
}
if policy.config["vtrace"]:
is_stat_mean = torch.mean(policy._is_ratio, [0, 1])
is_stat_var = torch.var(policy._is_ratio, [0, 1])
stats_dict.update({"mean_IS": is_stat_mean})
stats_dict.update({"var_IS": is_stat_var})
if policy.config["use_kl_loss"]:
stats_dict.update({"kl": policy._mean_kl})
stats_dict.update({"KL_Coeff": policy.kl_coeff})
return stats_dict
class TargetNetworkMixin:
"""Target NN is updated by master learner via the `update_target` method.
Updates happen every `trainer.update_target_frequency` steps. All worker
batches are importance sampled wrt the target network to ensure a more
stable pi_old in PPO.
"""
def __init__(self, obs_space, action_space, config):
def do_update():
# Update_target_fn will be called periodically to copy Q network to
# target Q network.
assert len(self.model_variables) == \
len(self.target_model_variables), \
(self.model_variables, self.target_model_variables)
self.target_model.load_state_dict(self.model.state_dict())
self.update_target = do_update
def add_values(policy, input_dict, state_batches, model, action_dist):
out = {}
if not policy.config["vtrace"]:
out[SampleBatch.VF_PREDS] = policy.model.value_function()
return out
def setup_early_mixins(policy: Policy, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict):
"""Call all mixin classes' constructors before APPOPolicy initialization.
Args:
policy (Policy): The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config (TrainerConfigDict): The Policy's config.
"""
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
def setup_late_mixins(policy: Policy, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict):
"""Call all mixin classes' constructors after APPOPolicy initialization.
Args:
policy (Policy): The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config (TrainerConfigDict): The Policy's config.
"""
KLCoeffMixin.__init__(policy, config)
ValueNetworkMixin.__init__(policy, obs_space, action_space, config)
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
# Move target net to device (this is done automatically for the
# policy.model, but not for any other models the policy has).
policy.target_model = policy.target_model.to(policy.device)
# Build a child class of `TorchPolicy`, given the custom functions defined
# above.
AsyncPPOTorchPolicy = build_policy_class(
name="AsyncPPOTorchPolicy",
framework="torch",
loss_fn=appo_surrogate_loss,
stats_fn=stats,
postprocess_fn=postprocess_trajectory,
extra_action_out_fn=add_values,
extra_grad_process_fn=apply_grad_clipping,
optimizer_fn=choose_optimizer,
before_init=setup_early_mixins,
before_loss_init=setup_late_mixins,
make_model=make_appo_model,
mixins=[
LearningRateSchedule, KLCoeffMixin, TargetNetworkMixin,
ValueNetworkMixin
],
get_batch_divisibility_req=lambda p: p.config["rollout_fragment_length"])
| pcmoritz/ray-1 | rllib/agents/ppo/appo_torch_policy.py | Python | apache-2.0 | 13,259 |
#
# Martin Gracik <mgracik@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.commands.zerombr import FC3_ZeroMbr
class ZeroMbr_TestCase(unittest.TestCase):
def runTest(self):
cmd = FC3_ZeroMbr()
self.assertEqual(cmd.zerombr, False)
class FC3_TestCase(CommandTest):
command = "zerombr"
def runTest(self):
# pass
self.assert_parse("zerombr", "zerombr\n")
# ignoring arguments
self.assert_parse("zerombr arg", "zerombr\n")
class F9_TestCase(FC3_TestCase):
def runTest(self):
# pass
self.assert_parse("zerombr", "zerombr\n")
# fail
# zerombr does not take any arguments
self.assert_parse_error("zerombr arg")
self.assert_parse_error("zerombr --bad-flag")
if __name__ == "__main__":
unittest.main()
| bcl/pykickstart | tests/commands/zerombr.py | Python | gpl-2.0 | 1,765 |
type(Key.F4, KeyModifier.ALT)
exit(0) | silverbulleters/vanessa-behavoir | tools/Sikuli/WaitForStringAllScenariosOK.sikuli/Exit1C.py | Python | apache-2.0 | 37 |
import logging
import json
from six import StringIO
import unittest
from vcardz import \
get_logger, \
set_logger, \
Parser, \
scrub
class TestScrub(unittest.TestCase):
def run_scrub(self, data):
logger = get_logger()
with open(data) as stream:
logger.warning('file => %s', data)
engine = Parser(stream)
count = 0
for card in engine:
count += 1
logger.warning('raw count => %d', count)
stream.seek(0)
result,subway = scrub(stream, clean_results=True)
logger.warning('scrub count => %d', len(result))
def test_scrub_1(self):
self.run_scrub('./data/test1.vcf')
def test_name_nonmatches(self):
self.run_scrub('./data/test2.vcf')
if __name__ == '__main__':
set_logger(logging.WARNING)
unittest.main()
| IdahoDataEngineers/vcardz | tests/test_scrub.py | Python | gpl-2.0 | 890 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import fubai22
class TestFubai22Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
| sinotradition/meridian | meridian/tst/acupoints/test_fubai22.py | Python | apache-2.0 | 295 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.