repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
zheller/flake8-quotes
|
test/test_checks.py
|
Python
|
mit
| 10,373
| 0.00617
|
from flake8_quotes import QuoteChecker
import os
import subprocess
from unittest import TestCase
class TestChecks(TestCase):
def test_get_noqa_lines(self):
checker = QuoteChecker(None, filename=get_absolute_path('data/no_qa.py'))
self.assertEqual(checker.get_noqa_lines(checker.get_file_contents()), [2])
class TestFlake8Stdin(TestCase):
def test_stdin(self):
"""Test using stdin."""
filepath = get_absolute_path('data/doubles.py')
with open(filepath, 'rb') as f:
p = subprocess.Popen(['flake8', '--select=Q', '-'], stdin=f,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout_lines = stdout.splitlines()
self.assertEqual(stderr, b'')
self.assertEqual(len(stdout_lines), 3)
self.assertRegex(
stdout_lines[0],
b'stdin:1:(24|25): Q000 Double quotes found but single quotes preferred')
self.assertRegex(
stdout_lines[1],
b'stdin:2:(24|25): Q000 Double quotes found but single quotes preferred')
self.assertRegex(
stdout_lines[2],
b'stdin:3:(24|25): Q000 Double quotes found but single quotes preferred')
class DoublesTestChecks(TestCase):
def setUp(self):
class DoublesOptions():
inline_quotes = "'"
multiline_quotes = "'"
QuoteChecker.parse_options(DoublesOptions)
def test_multiline_string(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_multiline_string.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_multiline_string_using_lines(self):
with open(get_absolute_path('data/doubles_multiline_string.py')) as f:
lines = f.readlines()
doubles_checker = QuoteChecker(None, lines=lines)
self.assertEqua
|
l(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_wrapped(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
def test_doubles(self)
|
:
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'},
])
def test_noqa_doubles(self):
checker = QuoteChecker(None, get_absolute_path('data/doubles_noqa.py'))
self.assertEqual(list(checker.run()), [])
def test_escapes(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'},
])
def test_escapes_allowed(self):
class Options():
inline_quotes = "'"
avoid_escape = False
QuoteChecker.parse_options(Options)
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
class DoublesAliasTestChecks(TestCase):
def setUp(self):
class DoublesAliasOptions():
inline_quotes = 'single'
multiline_quotes = 'single'
QuoteChecker.parse_options(DoublesAliasOptions)
def test_doubles(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'},
])
class SinglesTestChecks(TestCase):
def setUp(self):
class SinglesOptions():
inline_quotes = '"'
multiline_quotes = '"'
QuoteChecker.parse_options(SinglesOptions)
def test_multiline_string(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_multiline_string.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Single quote multiline found but double quotes preferred'},
])
def test_wrapped(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
def test_singles(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Single quotes found but double quotes preferred'},
])
def test_noqa_singles(self):
checker = QuoteChecker(None, get_absolute_path('data/singles_noqa.py'))
self.assertEqual(list(checker.run()), [])
def test_escapes(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'},
])
def test_escapes_allowed(self):
class Options():
inline_quotes = '"'
avoid_escape = False
QuoteChecker.parse_options(Options)
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
class SinglesAliasTestChecks(TestCase):
def setUp(self):
class SinglesAliasOptions():
inline_quotes = 'double'
multiline_quotes = 'double'
QuoteChecker.parse_options(SinglesAliasOptions)
def test_singles(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Single quotes found but double quotes
|
nbargnesi/proxme
|
setup.py
|
Python
|
mit
| 1,716
| 0.001166
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
]
setup_requirements = [
# TODO(nbargnesi): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='proxme',
version='0.1.0',
description="Serves your proxy auto-config (PAC) content.",
long_description=readme + '\n\n' + history,
author="Nick Bargnesi",
author_email='nick@den-4.com',
url='https://github.com/nbargnesi/proxme',
packages=find_packages(include=['proxme']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keyword
|
s='proxme',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
|
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
entry_points = {
'console_scripts': [
'proxme = proxme.__main__:main'
],
}
)
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20161109B.py
|
Python
|
mit
| 1,742
| 0.004018
|
"""
[2016-11-09] Challenge #291 [Intermediate] Reverse Polish Notation Calculator
https://www.reddit.com/r/dailyprogrammer/comments/5c5jx9/20161109_challenge_291_intermediate_reverse/
A little while back we had a programming
[challenge](https://www.reddit.com/r/dailyprogrammer/comments/2yquvm/20150311_challenge_205_intermediate_rpn/) to
convert an infix expression (also known as "normal" math) to a postfix expression (also known as [Reverse Polish
Notation](https://en.wikipedia.org/wiki/Reverse_Polish_notation)). Today we'll do something a little different: We will
write a calculator that takes RPN input, and outputs the result.
# Formal input
The input will be a whitespace-delimited RPN expression. The supported operators will be:
* `+` - addition
* `-` - subtraction
* `*`, `x` - multiplication
* `/` - division (floating point, e.g. `3/2=1.5`, not `3/2=1`)
* `//` - integer division (e.g. `3/2=1`)
* `%` - modulus, or "remainder" division (e.g. `14%3=2` and `21%7=0`)
* `^` - power
* `!` - factorial (unary operator)
**Sample input:**
0.5 1 2 ! * 2 1 ^ + 10 + *
# Formal output
The output is a single number: the result of the calculation. The output should also indicate if the input is not a
valid RPN expression.
**Sample output:**
7
Explanation: the sample input translates to `0.5 * ((1 * 2!) + (2 ^ 1) + 10)`, which comes out to `7`.
## Challenge 1
**Input:** `1 2 3 4 ! + - / 100 *`
**Output:** `-4`
## Challenge 2
**Input:** `100 807 3 331 * + 2 2 1 + 2 + * 5 ^ * 23 10 558 * 10 * + + *`
# Finally...
Hope you enjoyed today's challenge! Have a fun problem or challenge of your own?
|
Drop by /r/dailyprogrammer_ideas and
share it with everyone!
"""
def mai
|
n():
pass
if __name__ == "__main__":
main()
|
aronsky/home-assistant
|
homeassistant/components/vicare/sensor.py
|
Python
|
apache-2.0
| 16,333
| 0.000735
|
"""Viessmann ViCare sensor device."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass
import logging
from PyViCare.PyViCareUtils import (
PyViCareInvalidDataError,
PyViCareNotSupportedFeatureError,
PyViCareRateLimitError,
)
import requests
from homeassistant.components.sensor import (
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TIME_HOURS,
)
import homeassistant.util.dt as dt_util
from . import ViCareRequiredKeysMixin
from .const import DOMAIN, VICARE_API, VICARE_DEVICE_CONFIG, VICARE_NAME
_LOGGER = logging.getLogger(__name__)
SENSOR_OUTSIDE_TEMPERATURE = "outside_temperature"
SENSOR_SUPPLY_TEMPERATURE = "supply_temperature"
SENSOR_RETURN_TEMPERATURE = "return_temperature"
# gas sensors
SENSOR_BOILER_TEMPERATURE = "boiler_temperature"
SENSOR_BURNER_MODULATION = "burner_modulation"
SENS
|
OR_BURNER_STARTS = "burner_starts"
SENSOR_BURNER_HOURS = "burner_hours"
SENSOR_BURNER_POWER = "burner_power"
SENSOR_DHW_GAS_CONSUMPTION_TODAY = "hotwater_gas_consumption_today"
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK = "hotwater_gas_consumption_heating_this_week"
SE
|
NSOR_DHW_GAS_CONSUMPTION_THIS_MONTH = "hotwater_gas_consumption_heating_this_month"
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR = "hotwater_gas_consumption_heating_this_year"
SENSOR_GAS_CONSUMPTION_TODAY = "gas_consumption_heating_today"
SENSOR_GAS_CONSUMPTION_THIS_WEEK = "gas_consumption_heating_this_week"
SENSOR_GAS_CONSUMPTION_THIS_MONTH = "gas_consumption_heating_this_month"
SENSOR_GAS_CONSUMPTION_THIS_YEAR = "gas_consumption_heating_this_year"
# heatpump sensors
SENSOR_COMPRESSOR_STARTS = "compressor_starts"
SENSOR_COMPRESSOR_HOURS = "compressor_hours"
SENSOR_COMPRESSOR_HOURS_LOADCLASS1 = "compressor_hours_loadclass1"
SENSOR_COMPRESSOR_HOURS_LOADCLASS2 = "compressor_hours_loadclass2"
SENSOR_COMPRESSOR_HOURS_LOADCLASS3 = "compressor_hours_loadclass3"
SENSOR_COMPRESSOR_HOURS_LOADCLASS4 = "compressor_hours_loadclass4"
SENSOR_COMPRESSOR_HOURS_LOADCLASS5 = "compressor_hours_loadclass5"
# fuelcell sensors
SENSOR_POWER_PRODUCTION_CURRENT = "power_production_current"
SENSOR_POWER_PRODUCTION_TODAY = "power_production_today"
SENSOR_POWER_PRODUCTION_THIS_WEEK = "power_production_this_week"
SENSOR_POWER_PRODUCTION_THIS_MONTH = "power_production_this_month"
SENSOR_POWER_PRODUCTION_THIS_YEAR = "power_production_this_year"
@dataclass
class ViCareSensorEntityDescription(SensorEntityDescription, ViCareRequiredKeysMixin):
"""Describes ViCare sensor entity."""
GLOBAL_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key=SENSOR_OUTSIDE_TEMPERATURE,
name="Outside Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getOutsideTemperature(),
device_class=DEVICE_CLASS_TEMPERATURE,
),
ViCareSensorEntityDescription(
key=SENSOR_RETURN_TEMPERATURE,
name="Return Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getReturnTemperature(),
device_class=DEVICE_CLASS_TEMPERATURE,
),
ViCareSensorEntityDescription(
key=SENSOR_BOILER_TEMPERATURE,
name="Boiler Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getBoilerTemperature(),
device_class=DEVICE_CLASS_TEMPERATURE,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_TODAY,
name="Hot water gas consumption today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterToday(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK,
name="Hot water gas consumption this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisWeek(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH,
name="Hot water gas consumption this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisMonth(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR,
name="Hot water gas consumption this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisYear(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_TODAY,
name="Heating gas consumption today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingToday(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_THIS_WEEK,
name="Heating gas consumption this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingThisWeek(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_THIS_MONTH,
name="Heating gas consumption this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingThisMonth(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_THIS_YEAR,
name="Heating gas consumption this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingThisYear(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_CURRENT,
name="Power production current",
native_unit_of_measurement=POWER_WATT,
value_getter=lambda api: api.getPowerProductionCurrent(),
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_TODAY,
name="Power production today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionToday(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_THIS_WEEK,
name="Power production this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisWeek(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_THIS_MONTH,
name="Power production this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisMonth(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_THIS_YEAR,
name="Power production this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisYear(),
|
psikon/pitft-scripts
|
src/mainscreen.py
|
Python
|
mit
| 2,347
| 0.007669
|
#!/usr/bin/env python
'''
Generate the main window for the pi-gui program. The interface show the last played
item with cover, title and supllemental informations that is interactive
and two buttons for show up the library screen and exit the porgram itself.
'''
#@author: Philipp Sehnert
#@contact: philipp.sehnert[a]gmail.com
# python imports
import sys, os
import pygame
# internal imports
from interfaces import Interface
YELLOW = (255, 255, 0)
class MainMenu():
''' generate the start interface for accessing all other screens'''
def __init__(self, screen, funcs, hardware_instance, book):
# declare important variables
self.screen = screen
# important for framerate
self.clock = pygame.time.Clock()
# contain all interface methods
self.interface = Interface()
# functions
|
for the menu items
self.funcs = funcs
# cached book for last played window
self.book = book
#define function that checks for mouse location
def on_click(self):
click_pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
# select last played item
if 10 <= click_pos[0] <= 310 and 120 <= click_pos[1] <= 185:
self.funcs['Continue'](self.book)
|
# go to library screen
if 10 <= click_pos[0] <= 205 and 190 <= click_pos[1] <= 230:
self.funcs['Select Book']()
# exit gui
if 265 <= click_pos[0] <= 315 and 190 <= click_pos[1] <= 230:
self.interface.exit_interface(self.screen)
def run(self):
'''run method for drawing the screen to dispay'''
mainloop = True
# use infinity loop for showing the screen
while mainloop:
# Limit frame speed to 30 FPS
self.clock.tick(30)
self.interface.main_interface(self.screen, self.book)
# wait for a pressed button or exit infinity loop
for event in pygame.event.get():
# recognize mouse and touchscreen activity
if event.type == pygame.MOUSEBUTTONDOWN:
pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
pygame.draw.circle(self.screen, YELLOW, pos, 10, 0)
self.on_click()
# update the screen
pygame.display.flip()
|
vlegoff/tsunami
|
src/abstraits/obase/__init__.py
|
Python
|
bsd-3-clause
| 9,968
| 0.002834
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit la classe BaseObj définie plus bas."""
import sys
import traceback
import time
from collections import OrderedDict
from bases.collections.dictionnaire import *
from bases.collections.liste import Liste
objets_base = {} # dictionnaire des différents BaseObj {nom_cls:cls}
# Objets chargés
objets = {}
objets_par_type = {}
ids = {}
statut_gen = 0 # 0 => OK, 1 => en cours
classes_base = {}
class MetaBaseObj(type):
"""Métaclasse des objets de base.
Cette métaclasse est là pour gérer les versions des différents objets
BaseObj :
Si un objet BaseObj change de structure, pour X raison (par exemple
|
un attribut change de nom ou de type), à la récupération l'objet sera
mis à jour grâce à une fonction définie dans le convertisseur
(voir BaseObj.update).
La fonction se trouvera dans un fichier identifiant le nom de la
classe. On s'assure grâce à cette métaclasse que deux classes
héritées de BaseObj n'ont pas un nom identique et on attribut
un numéro de version (0) par défaut aux objets issus de
|
ces
classes hérités.
"""
def __init__(cls, nom, bases, contenu):
"""Constructeur de la métaclasse"""
type.__init__(cls, nom, bases, contenu)
classes_base[cls.__module__ + "." + cls.__name__] = cls
# Si on trouve les attributs _nom et _version,
# c'est que la classe est versionnée
if "_nom" in contenu and "_version" in contenu:
cls._version = contenu["_version"]
cls._nom = contenu["_nom"]
# Pas de doublons !
if cls._nom in objets_base:
if objets_base[cls._nom] == cls:
return
raise RuntimeError("La classe {0} héritée de BaseObj " \
"possède le même nom que la classe {1}".format( \
str(cls), str(objets_base[cls._nom])))
objets_base[cls._nom] = cls
# On décore la méthode __init__ de la classe
ancien_init = cls.__init__
def new_init(self, *args, **kwargs):
ancien_init(self, *args, **kwargs)
self.set_version(cls, cls._version)
cls.__init__ = new_init
else:
cls._version = None
cls._nom = None
INIT, CONSTRUIT = 0, 1
class BaseObj(metaclass=MetaBaseObj):
"""Classe devant être héritée de la grande majorité des classes de Kassie.
Le test est simple : si l'objet issu de la classe doit être enregistré,
l'hériter de BaseObj.
"""
importeur = None
enregistrer = False
_nom = "base_obj"
_version = 1
def __init__(self):
"""Instancie un simple statut"""
self._statut = INIT
# On initialise le dictionnaire des versions de l'objet
self._dict_version = {}
self.e_existe = True
self.ajouter_enr()
def __getnewargs__(self):
raise NotImplementedError(
"la classe " + str(type(self)) + " n'a pas de méthode " \
"__getnewargs__")
def ajouter_enr(self):
if self.e_existe and type(self).enregistrer and statut_gen == 0 and \
id(self) not in objets:
objets[id(self)] = self
liste = objets_par_type.get(type(self), [])
liste.append(self)
objets_par_type[type(self)] = liste
def version_actuelle(self, classe):
"""Retourne la version actuelle de l'objet.
Cette version est celle enregistrée dans l'objet. Elle peut
donc être différence de la classe (c'est le cas au chargement d'un
objet à mettre à jour).
"""
if classe._nom in self._dict_version:
return self._dict_version[classe._nom]
else:
return 0
def set_version(self, classe, version):
"""Met le numéro de version dans le dictionnaire de version."""
self._dict_version[classe._nom] = version
def _construire(self):
"""Construit l'objet"""
self._statut = CONSTRUIT
def detruire(self):
"""Marque l'objet comme détruit."""
self.e_existe = False
importeur.supenr.detruire_objet(self)
if id(self) in objets:
del objets[id(self)]
@property
def construit(self):
return hasattr(self, "_statut") and self._statut == CONSTRUIT
def __setstate__(self, dico_attrs):
"""Méthode appelée lors de la désérialisation de l'objet"""
global statut_gen
statut_gen = 1
# On récupère la classe
classe = type(self)
# On appel son constructeur
try:
classe.__init__(self, *self.__getnewargs__())
except NotImplementedError:
print("Méthode __getnewargs__ non définie pour", classe)
sys.exit(1)
except TypeError as err:
print("Erreur lors de l'appel au constructeur de", classe, err)
print(traceback.format_exc())
sys.exit(1)
self.__dict__.update(dico_attrs)
# On vérifie s'il a besoin d'une vraie mis à jour
self._update(classe)
statut_gen = 0
self.ajouter_enr()
def _update(self, classe):
"""Méthode appelée pendant la désérialisation de l'objet,
destinée à vérifier si l'objet doit être mis à jour et, le cas
échéant, le mettre à jour.
"""
# Mise à jour récursive par rapport aux classes-mères
for base in classe.__bases__:
# Inutile d'essayer de mettre à jour 'object'
if base is not object:
base._update(self, base)
if classe._nom in objets_base:
# On importe le convertisseur dédié à la classe en cours
try:
convertisseur = getattr(__import__( \
"primaires.supenr.convertisseurs." + classe._nom, \
globals(), locals(), ["Convertisseur"]), \
"Convertisseur")
except ImportError as error:
print("La classe {0} suivie en version ne possède pas de " \
"fichier de convertisseurs dans primaires.supenr." \
"convertisseurs".format(classe._nom))
exit()
except AttributeError as error:
print("Le fichier {0}.py dans primaires.supenr." \
"convertisseurs ne possède pas de classe " \
"Convertisseur".format(classe._nom))
exit()
# On vérifie la version de
|
krfkeith/enough
|
lib/dot.py
|
Python
|
gpl-3.0
| 6,472
| 0.004944
|
# Copyright (c) 2007 Enough Project.
# See LICENSE for details.
## /* Copyright 2007, Eyal Lotem, Noam Lewis, enoughmail@googlegroups.com */
## /*
## This file is part of Enough.
## Enough is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Enough is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
## */
# Parses DOT "plain" output
# graph scale width height
# node name x y width height label style shape color fillcolor
# edge tail head n x1 y1 .. xn yn [label xl yl] style color
# stop
from twisted.internet import protocol, defer
from twisted.protocols.basic import LineReceiver
class OutOfDate(Exception): pass
class Error(Exception): pass
class _ProtocolWrapper(protocol.ProcessProtocol):
"""
This class wraps a L{Protocol} instance in a L{ProcessProtocol} instance.
"""
def __init__(self, proto):
self.proto = proto
def connectionMade(self):
self.proto.connectionMade()
def outReceived(self, data):
self.proto.dataReceived(data)
def errReceived(self, data):
import sys
sys.stderr.write(data)
sys.stderr.flus
|
h()
def processEnded(self, reason):
self.proto.connectionLost(reason)
class _DotProtocol(LineReceiver):
delimiter = '
|
\n'
def __init__(self):
self._waiting = None
self._current_graph_parser = None
self._process = None
def set_process(self, process):
self._process = process
def lineReceived(self, line):
if self._current_graph_parser is None:
raise Error("Dot outputs stuff, we're not expecting it", line)
self._current_graph_parser.lineReceived(line)
def _completed_current(self, result):
self._current_graph_parser = None
if self._waiting:
dot_graph_text, d = self._waiting
self._waiting = None
self._start(dot_graph_text, d)
return result
def get_graph_data(self, dot_graph_text):
d = defer.Deferred()
if self._current_graph_parser:
# Let the current result finish computing, "queue" this
# one.
if self._waiting:
self._waiting[1].errback(OutOfDate())
self._waiting = dot_graph_text, d
else:
self._start(dot_graph_text, d)
return d
def _start(self, dot_graph_text, d):
self._process.write(dot_graph_text + '\n')
d.addBoth(self._completed_current)
self._current_graph_parser = _GraphParser(d)
class _GraphParser(object):
def __init__(self, dresult):
self.dresult = dresult
self.graph = {}
self.nodes = {}
self.edges = {} # by heads
def lineReceived(self, line):
graph, nodes, edges = self.graph, self.nodes, self.edges
words = line.split()
if words[0] == 'graph':
graph['scale'], graph['width'], graph['height'] = map(float, words[1:])
return
if words[0] == 'node':
node = {}
node['name'] = words[1]
start = 2
for i,attr in enumerate(('x', 'y','width', 'height',)):
node[attr] = float(words[i+start])
start += 4
for i,attr in enumerate(('label', 'style',
'shape', 'color',
'fillcolor')):
node[attr] = (words[i+start])
nodes[node['name']] = node
return
if words[0] == 'edge':
edge = {}
edge['tail'] = words[1]
edge['head'] = words[2]
n = int(words[3])
points = []
i = 4
while (i - 4) / 2 < n:
points.append((float(words[i]), float(words[i+1])))
i += 2
edge['points'] = points
if len(words) == 6+n*2:
edge['label'] = edge['lx'] = edge['ly'] = None
elif len(words) == 9+n*2:
edge['label'] = words[-5]
edge['lx'], edge['ly'] = float(words[-4]), float(words[-3])
else:
assert False, "Cannot understand %r" % (line,)
edge['style'] = words[-2]
edge['color'] = words[-1]
edges.setdefault(edge['tail'], []).append(edge)
return
if words[0] == 'stop':
self.dresult.callback((graph, nodes, edges))
return
self.dresult.errback(ValueError("Unexpected statement", line))
class Dot(object):
layout_programs = ('dot', 'neato', 'twopi')
def __init__(self):
from twisted.internet import reactor
self.protocol = _DotProtocol()
self.processes = {}
for prog, command_line in find_dot(self.layout_programs).iteritems():
process = reactor.spawnProcess(_ProtocolWrapper(self.protocol),
command_line, [command_line, '-Tplain', '-y'])
self.processes[prog] = process
self.set_process('dot')
def set_process(self, prog):
self.protocol.set_process(self.processes[prog])
def get_graph_data(self, dot_graph_text):
return self.protocol.get_graph_data(dot_graph_text)
def find_dot(layout_programs):
import sys
import os
if sys.platform == 'win32':
DOT_PATH = r'\program files\att\graphviz\bin'
DOT_SUFFIX = '.exe'
for drive in ('c', 'd'):
if os.path.isdir(drive + ':' + DOT_PATH):
break
else:
raise Exception("Couldn't find DOT installation path")
DOT_PATH = drive + ':' + DOT_PATH
else:
# Assume dot programs have no suffix and are in the PATH
DOT_PATH = ''
DOT_SUFFIX = ''
res_paths = {}
for prog in layout_programs:
res_paths[prog] = os.path.join(DOT_PATH, prog+DOT_SUFFIX)
return res_paths
|
yochow/autotest
|
server/hosts/monitors/monitors_util.py
|
Python
|
gpl-2.0
| 10,543
| 0.000474
|
# Shared utility functions across monitors scripts.
import fcntl, os, re, select, signal, subprocess, sys, time
TERM_MSG = 'Console connection unexpectedly lost. Terminating monitor.'
class Error(Exception):
pass
class InvalidTimestampFormat(Error):
pass
def prepend_timestamp(msg, format):
"""Prepend timestamp to a message in a standard way.
Args:
msg: str; Message to prepend timestamp to.
format: str or callable; Either format string that
can be passed to time.strftime or a callable
that will generate the timestamp string.
Returns: str; 'timestamp\tmsg'
"""
if type(format) is str:
timestamp = time.strftime(format, time.localtime())
elif callable(format):
timestamp = str(format())
else:
raise InvalidTimestampFormat
return '%s\t%s' % (timestamp, msg)
def write_logline(logfile, msg, timestamp_format=None):
"""Write msg, possibly prepended with a timestamp, as a terminated line.
Args:
logfile: file; File object to .write() msg to.
msg: str; Message to write.
timestamp_format: str or callable; If specified will
be passed into prepend_timestamp along with msg.
"""
msg = msg.rstrip('\n')
if timestamp_format:
msg = prepend_timestamp(msg, timestamp_format)
logfile.write(msg + '\n')
def make_alert(warnfile, msg_type, msg_template, timestamp_format=None):
"""Create an alert generation function that writes to warnfile.
Args:
warnfile: file; File object to write msg's to.
msg_type: str; String describing the message type
msg_template: str; String template that function params
are passed through.
timestamp_format: str or callable; If specified will
be passed into prepend_timestamp along with msg.
Returns: function with a signature of (*params);
The format for a warning used here is:
%(timestamp)d\t%(msg_type)s\t%(status)s\n
"""
if timestamp_format is None:
timestamp_format = lambda: int(time.time())
def alert(*params):
formatted_msg = msg_type + "\t" + msg_template % params
timestamped_msg = prepend_timestamp(formatted_msg, timestamp_format)
print >> warnfile, timestamped_msg
return alert
def build_alert_hooks(patterns_file, warnfile):
"""Parse data in patterns file and transform into alert_hook list.
Args:
patterns_file: file; File to read alert pattern definitions from.
warnfile: file; File to configure alert function to write warning to.
Returns:
list; Regex to alert function mapping.
[(regex, alert_function), ...]
"""
pattern_lines = patterns_file.readlines()
# expected pattern format:
# <msgtype> <newline> <regex> <newline> <alert> <newline> <newline>
# msgtype = a string categorizing the type of the message - used for
# enabling/disabling specific categories of warnings
# regex = a python regular expression
# alert = a string describing the alert message
# if the regex matches the line, this displayed warning will
# be the result of (alert % match.groups())
patterns = zip(pattern_lines[0::4], pattern_lines[1::4],
pattern_lines[2::4])
# assert that the patterns are separated by empty lines
if sum(len(line.strip()) for line in pattern_lines[3::4]) > 0:
raise ValueError('warning patterns are not separated by blank lines')
hooks = []
for msgtype, regex, alert in patterns:
regex = re.compile(regex.rstrip('\n'))
alert_function = make_alert(warnfile, msgtype.rstrip('\n'),
alert.rstrip('\n'))
hooks.append((regex, alert_function))
return hooks
def process_input(
input, logfile, log_timestamp_format=None, alert_hooks=()):
"""Continuously read lines from input stream and:
- Write them to log, possibly prefixed by timestamp.
- Watch for alert patterns.
Args:
input: file; Stream to read from.
logfile: file; Log file to write to
log_timestamp_format: str; Format to use for timestamping entries.
No timestamp is added if None.
alert_hooks: list; Generated from build_alert_hooks.
[(regex, alert_function), ...]
"""
while True:
line = input.readline()
if len(line) == 0:
# this should only happen if the remote console unexpectedly
# goes away. terminate this process so that we don't spin
# forever doing 0-length reads off of input
write_logline(logfile, TERM_MSG, log_timestamp_format)
break
if line == '\n':
# If it's just an empty line we discard and continue.
continue
write_logline(logfile, line, log_timestamp_format)
for regex, callback in alert_hooks:
match = re.match(regex, line.strip())
if match:
callback(*match.groups())
def lookup_lastlines(lastlines_dirpath, path):
"""Retrieve last lines seen for path.
Open corresponding lastline file for path
If there isn't one or isn't a match return None
Args:
lastlines_dirpath: str; Dirpath to store lastlines files to.
path: str; Filepath to source file that lastlines came from.
Returns:
str; Last lines seen if they exist
- Or -
None; Otherwise
"""
underscored = path.replace('/', '_')
try:
lastlines_file = open(os.path.join(lastlines_dirpath, underscored))
except (OSError, IOError):
return
lastlines = lastlines_file.read()
lastlines_file.close()
os.remove(lastlines_file.name)
if not lastlines:
return
try:
target_file = open(path)
except (OSError, IOError):
return
# Load it all in for now
target_data = target_file.read()
target_file.close()
# Get start loc in the target_data string, scanning from right
loc = target_data.rfind(lastlines)
if loc == -1:
return
# Then translate this into a reverse line number
# (count newlines that occur afterward)
reverse_lineno = target_data.count('\n', loc + len(lastlines))
return reverse_lineno
def write_lastlines_file(lastlines_dirpath, path, data):
"""Write data to lastlines file for path.
Args:
lastlines_dirpath: str; Dirpath to store lastlines files to.
path: str; Filepath to source file that data comes from.
data: str;
Returns:
str; Filepath that lastline data was written to.
"""
underscored = path.replace('/', '_')
dest_path = os.path.join(lastlines_dirpath, underscored)
open(dest_path, 'w').write(data)
return dest_path
def nonblocking(pipe):
"""Set python file object to nonblock
|
ing mode.
This allows us to take advantage of pipe.read()
where we don't have to specify a buflen.
Cuts down on a few lines we'd have to maintain.
Args:
pipe: file; File object to modify
Returns: pipe
"""
flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
fcntl.fcntl(pipe, fcntl.F_SETFL, flags| os.O_NONBLOCK)
return pipe
def launch_tails(follow_paths, lastlines_dirpath=None):
"""Launch a tail process for each follow_path.
Args:
follow
|
_paths: list;
lastlines_dirpath: str;
Returns:
tuple; (procs, pipes) or
({path: subprocess.Popen, ...}, {file: path, ...})
"""
if lastlines_dirpath and not os.path.exists(lastlines_dirpath):
os.makedirs(lastlines_dirpath)
tail_cmd = ('/usr/bin/tail', '--retry', '--follow=name')
procs = {} # path -> tail_proc
pipes = {} # tail_proc.stdout -> path
for path in follow_paths:
cmd = list(tail_cmd)
if lastlines_dirpath:
reverse_lineno = lookup_lastlines(lastlines_dirpath, path)
if reverse_lineno is None:
reverse_lineno = 1
cmd.append('--lines=%d' % reverse_lineno)
cmd.append(path)
tail_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
procs[path] =
|
themadhatterz/cpapi
|
app/checkpoint.py
|
Python
|
mit
| 6,936
| 0.001874
|
import ast
import base64
import itertools
from functools import lru_cache
import cpapilib
from flask import session
from app import app
OBJECTS_DICTIONARY = None
@lru_cache(maxsize=5000)
def uid_name(uid_obj):
for obj in OBJECTS_DICTIONARY:
if uid_obj == obj['uid']:
return obj['name']
class API(cpapilib.Management):
def pre_data(self):
self.all_colors = [
'aquamarine', 'blue', 'crete blue', 'burlywood', 'cyan',
'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray',
'light green', 'lemon chiffon', 'coral', 'sea green', 'sky blue',
'magenta', 'purple', 'slate blue', 'violet red', 'navy blue',
'olive', 'orange', 'red', 'sienna', 'yellow'
]
self.all_commands = [command['name'] for command in self.show('commands')['commands']]
self.all_targets = [target['name'] for batch in self.show_all('gateways-and-servers') for target in batch['objects']]
self.all_layers = [(layer['name'], layer['uid']) for batch in self.show_all('access-layer') for layer in batch['access-layers']]
def customcommand(self, command, payload):
"""Validate payload and send command to server."""
try:
payload = ast.literal_eval(payload)
except ValueError:
return 'Invalid input provided.'
except Exception as e:
return e
return self._api_call(command, **payload)
def runcommand(self, targets, script):
"""Issue command against Check Point targets, verify task is complete
on each gateways and return response for each target."""
taskreturn = []
payload = {
'script-name': 'cpapi',
'script': script,
'targets': targets
}
response = self.run('script', **payload)
if 'tasks' in response:
for task in response['tasks']:
target = task['target']
taskid = task['task-id']
taskresponse = self.monitortask(target, taskid)
taskreturn.append(taskresponse)
return taskreturn
@staticmethod
def base64_ascii(base64resp):
"""Converts base64 to ascii for run command/showtask."""
return base64.b64decode(base64resp).decode('utf-8')
def monitortask(self, target, taskid):
"""Run gettask until task is complete and we can return response."""
if self.monitor_task(taskid, timeout=30):
response = self.show('task', **{'task-id': taskid, 'details-level': 'full'})
if response['tasks'][0]['task-details'][0]['responseMessage']:
base64resp = response['tasks'][0]['task-details'][0]['responseMessage']
asciiresp = self.base64_ascii(base64resp)
taskresponse = {
'target': target,
'status': response['tasks'][0]['status'],
'response': asciiresp
}
else:
taskresponse = {
'target': target,
'status': response['tasks'][0]['status'],
'response': 'Not Available'
}
else:
app.logger.warn('Script did not finish within time limit on {}.'.format(target))
taskresponse = {
'target': target,
'status': 'Task did not complete within 30 seconds.',
'response': 'Unavailable.'
}
return taskresponse
def show_object(self, objuid):
show_obj_response = self.show('object', uid=objuid)
payload = {
'uid': objuid,
'details-level': 'full'
}
type_obj_response = self.show(show_obj_response['object']['type'], **payload)
return type_obj_response
def show_rules(self, **kwargs):
"""Recieves Layer UID, limit, offset."""
all_rules = {'rulebase': []}
app.logger.info('Retrieving rules for - {}'.format(kwargs))
response = self.show('access-rulebase', **kwargs)
all_rules.update({'to': response['to'], 'total': response['total']})
self._filter_rules(all_rules, response)
return all_rules
def _filter_rules(self, all_rules, response):
"""Recieves show_rules response and performs logic against whether
rules are sections or rules."""
for rule in response['rulebase']:
if 'type' in rule:
if rule['type'] == 'access-rule':
final = self._filter_rule(rule, response['objects-dictionary'])
all_rules['rulebase'].append(final)
elif rule['type'] == 'access-section':
if 'name' in rule:
section = rule['name']
|
else:
section = ''
all_rules['rulebase'].append({'type': 'accesssection', 'name': section})
if 'rulebase' in rule:
for subrule in rule['rulebase']:
final = self._filter_rule(subrule, response['objects-diction
|
ary'])
all_rules['rulebase'].append(final)
return all_rules
@staticmethod
def _filter_rule(rule, object_dictionary):
"""Recieves rule and replaces UID with Name."""
global OBJECTS_DICTIONARY
OBJECTS_DICTIONARY = object_dictionary
src = rule['source']
src_all = []
dst = rule['destination']
dst_all = []
srv = rule['service']
srv_all = []
act = rule['action']
trg = rule['install-on']
trg_all = []
if rule['track']['type']:
trc = rule['track']['type']
else:
trc = rule['track']
for srcobj, dstobj, srvobj, trgobj in itertools.zip_longest(src, dst, srv, trg):
if srcobj:
src_all.append((uid_name(srcobj), srcobj))
if dstobj:
dst_all.append((uid_name(dstobj), dstobj))
if srvobj:
srv_all.append((uid_name(srvobj), srvobj))
if trgobj:
trg_all.append((uid_name(trgobj), trgobj))
return {
'type': 'accessrule',
'number': rule['rule-number'],
'name': rule.get('name', ''),
'source': src_all,
'source-negate': rule['source-negate'],
'destination': dst_all,
'destination-negate': rule['destination-negate'],
'service': srv_all,
'service-negate': rule['service-negate'],
'action': uid_name(act),
'track': uid_name(trc),
'target': trg_all,
'enabled': rule['enabled']
}
|
smallyear/linuxLearn
|
salt/salt/modules/freebsdports.py
|
Python
|
apache-2.0
| 13,305
| 0
|
# -*- coding: utf-8 -*-
'''
Install software from the FreeBSD ``ports(7)`` system
.. versionadded:: 2014.1.0
This module allows you to install ports using ``BATCH=yes`` to bypass
configuration prompts. It is recommended to use the :mod:`ports state
<salt.states.freebsdports>` to install ports, but it it also possible to use
this module exclusively from the command line.
.. code-block:: bash
salt minion-id ports.config security/nmap IPV6=off
salt minion-id ports.install security/nmap
'''
from __future__ import absolute_import
# Import python libs
import fnmatch
import os
import re
import logging
# Import salt libs
import salt.utils
from salt.ext.six import string_types
from salt.exceptions import SaltInvocationError, CommandExecutionError
import salt.ext.six as six
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'ports'
def __virtual__():
return __virtualname__ if __grains__.get('os', '') == 'FreeBSD' else False
def _portsnap():
'''
Return 'portsnap --interactive' for FreeBSD 10, otherwise 'portsnap'
'''
return 'portsnap{0}'.format(
' --interactive' if float(__grains__['osrelease']) >= 10
else ''
)
def _check_portname(name):
'''
Check if portname is valid and whether or not the directory exists in the
ports tree.
'''
if not isinstance(name, string_types) or '/' not in name:
raise SaltInvocationError(
'Invalid port name {0!r} (category required)'.format(name)
)
path = os.path.join('/usr/ports', name)
if not os.path.isdir(path):
raise SaltInvocationError('Path {0!r} does not exist'.format(path))
return path
def _options_dir(name):
'''
Retrieve the path to the dir containing OPTIONS file for a given port
'''
_check_portname(name)
_root = '/var/db/ports'
# New path: /var/db/ports/category_portname
new_dir = os.path.join(_root, name.replace('/', '_'))
# Old path: /var/db/ports/portname
old_dir = os.path.join(_root, name.split('/')[-1])
if os.path.isdir(old_dir):
return old_dir
return new_dir
def _options_file_exists(name):
'''
|
Returns True/False based on whether or not the options file for the
specified port exists.
'''
return os.path.isfile(os.path.join
|
(_options_dir(name), 'options'))
def _write_options(name, configuration):
'''
Writes a new OPTIONS file
'''
_check_portname(name)
pkg = next(iter(configuration))
conf_ptr = configuration[pkg]
dirname = _options_dir(name)
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except OSError as exc:
raise CommandExecutionError(
'Unable to make {0}: {1}'.format(dirname, exc)
)
with salt.utils.fopen(os.path.join(dirname, 'options'), 'w') as fp_:
sorted_options = list(conf_ptr.keys())
sorted_options.sort()
fp_.write(
'# This file was auto-generated by Salt (http://saltstack.com)\n'
'# Options for {0}\n'
'_OPTIONS_READ={0}\n'
'_FILE_COMPLETE_OPTIONS_LIST={1}\n'
.format(pkg, ' '.join(sorted_options))
)
opt_tmpl = 'OPTIONS_FILE_{0}SET+={1}\n'
for opt in sorted_options:
fp_.write(
opt_tmpl.format(
'' if conf_ptr[opt] == 'on' else 'UN',
opt
)
)
def _normalize(val):
'''
Fix Salt's yaml-ification of on/off, and otherwise normalize the on/off
values to be used in writing the options file
'''
if isinstance(val, bool):
return 'on' if val else 'off'
return str(val).lower()
def install(name, clean=True):
'''
Install a port from the ports tree. Installs using ``BATCH=yes`` for
non-interactive building. To set config options for a given port, use
:mod:`ports.config <salt.modules.freebsdports.config>`.
clean : True
If ``True``, cleans after installation. Equivalent to running ``make
install clean BATCH=yes``.
.. note::
It may be helpful to run this function using the ``-t`` option to set a
higher timeout, since compiling a port may cause the Salt command to
exceed the default timeout.
CLI Example:
.. code-block:: bash
salt -t 1200 '*' ports.install security/nmap
'''
portpath = _check_portname(name)
old = __salt__['pkg.list_pkgs']()
if old.get(name.rsplit('/')[-1]):
deinstall(name)
result = __salt__['cmd.run_all'](
'make install{0} BATCH=yes'.format(' clean' if clean else ''),
cwd=portpath, reset_system_locale=False
)
if result['retcode'] != 0:
__context__['ports.install_error'] = result['stderr']
__context__.pop('pkg.list_pkgs', None)
new = __salt__['pkg.list_pkgs']()
ret = salt.utils.compare_dicts(old, new)
if not ret and result['retcode'] == 0:
# No change in package list, but the make install was successful.
# Assume that the installation was a recompile with new options, and
# set return dict so that changes are detected by the ports.installed
# state.
ret = {name: {'old': old.get(name, ''),
'new': new.get(name, '')}}
return ret
def deinstall(name):
'''
De-install a port.
CLI Example:
.. code-block:: bash
salt '*' ports.deinstall security/nmap
'''
portpath = _check_portname(name)
old = __salt__['pkg.list_pkgs']()
__salt__['cmd.run']('make deinstall BATCH=yes', cwd=portpath)
__context__.pop('pkg.list_pkgs', None)
new = __salt__['pkg.list_pkgs']()
return salt.utils.compare_dicts(old, new)
def rmconfig(name):
'''
Clear the cached options for the specified port; run a ``make rmconfig``
name
The name of the port to clear
CLI Example:
.. code-block:: bash
salt '*' ports.rmconfig security/nmap
'''
portpath = _check_portname(name)
return __salt__['cmd.run']('make rmconfig', cwd=portpath)
def showconfig(name, default=False, dict_return=False):
'''
Show the configuration options for a given port.
default : False
Show the default options for a port (not necessarily the same as the
current configuration)
dict_return : False
Instead of returning the output of ``make showconfig``, return the data
in an dictionary
CLI Example:
.. code-block:: bash
salt '*' ports.showconfig security/nmap
salt '*' ports.showconfig security/nmap default=True
'''
portpath = _check_portname(name)
if default and _options_file_exists(name):
saved_config = showconfig(name, default=False, dict_return=True)
rmconfig(name)
if _options_file_exists(name):
raise CommandExecutionError('Unable to get default configuration')
default_config = showconfig(name, default=False,
dict_return=dict_return)
_write_options(name, saved_config)
return default_config
try:
result = __salt__['cmd.run_all']('make showconfig', cwd=portpath)
output = result['stdout'].splitlines()
if result['retcode'] != 0:
error = result['stderr']
else:
error = ''
except TypeError:
error = result
if error:
msg = ('Error running \'make showconfig\' for {0}: {1}'
.format(name, error))
log.error(msg)
raise SaltInvocationError(msg)
if not dict_return:
return '\n'.join(output)
if (not output) or ('configuration options' not in output[0]):
return {}
try:
pkg = output[0].split()[-1].rstrip(':')
except (IndexError, AttributeError, TypeError) as exc:
log.error(
'Unable to get pkg-version string: {0}'.format(exc)
)
return {}
ret = {pkg: {}}
output = output[1:]
for line in output:
try:
opt, val, desc = re.match(
r'\s+([^=]+)=(off|on): (.+)', line
|
fokusov/moneyguru
|
core/plugin/stale_currency_provider.py
|
Python
|
gpl-3.0
| 3,715
| 0.004307
|
# Copyright 2016 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
# This plugin subclasses CurrencyProviderPlugin to provide additional currencies, whose rates are
# stale, and thus never updated. If you want to add your own fancy weird currency, this is the
# best place.
from datetime import date
from core.plugin import CurrencyProviderPlugin
class StaleProviderPlugin(CurrencyProviderPlugin):
NAME = 'Stale currencies provider'
AUTHOR = "Virgil Dupras"
def register_currencies(self):
self.register_currency(
'ATS', 'Austrian schilling',
start_date=date(1998, 1, 2), start_rate=0.1123, stop_date=date(2001, 12, 31), latest_rate=0.10309)
self.register_currency(
'BEF', 'Belgian franc',
start_date=date(1998, 1, 2), start_rate=0.03832, stop_date=date(2001, 12, 31), latest_rate=0.03516)
self.register_currency(
'DEM', 'German deutsche mark',
start_date=date(1998, 1, 2), start_rate=0.7904, stop_date=date(2001, 12, 31), latest_rate=0.7253)
self.register_currency(
'ESP', 'Spanish peseta',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.009334,
stop_date=date(2001, 12, 31), latest_rate=0.008526)
self.register_currency(
'FIM', 'Finnish markka',
start_date=date(1998, 1, 2), start_rate=0.2611, stop_date=date(2001, 12, 31), latest_rate=0.2386)
self.register_currency(
'FRF', 'French franc',
start_date=date(1998, 1, 2), start_rate=0.2362, stop_date=date(2001, 12, 31), latest_rate=0.2163)
self.register_currency(
'GHC', 'Ghanaian cedi (old)',
start_date=date(1998, 1, 2), start_rate=0.00063, stop_date=date(2007, 6, 29), latest_rate=0.000115)
self.register_currency(
'GRD', 'Greek drachma',
start_date=date(1998, 1, 2), start_rate=0.005, stop_date=date(2001, 12, 31), latest_rate=0.004163)
self.register_currency(
'IEP', 'Irish pound',
start_date=date(1998, 1, 2), start_rate=2.0235, stop_date=date(2001, 12, 31), latest_rate=1.8012)
self.register_currency(
'ITL', 'Italian lira',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.000804,
stop_date=date(2001, 12, 31), latest_rate=0.000733)
self.register_currency(
'NLG', 'Netherlands guilder',
start_date=date(1998, 1, 2), start_rate=0.7013, stop_date=date(2001, 12, 31), latest_rate=0.6437)
self.register_currency(
'PTE', 'Portuguese escudo',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.007726,
stop_date=date(2001, 12, 31), latest_rate=0.007076)
self.register_currency(
'SIT', 'Slovenian tolar',
start_date=date(2002, 3, 1), start_rate=0.006174, stop_date=date(2006, 12, 29), latest_rate=0.006419)
sel
|
f.register_currency(
'TRL', 'Turkish lira',
exponent=0, start_date=date(1998, 1, 2), start_rate=7.0e-06,
stop_date=date(2004, 12, 31), latest
|
_rate=8.925e-07)
self.register_currency(
'VEB', 'Venezuelan bolivar',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.002827,
stop_date=date(2007, 12, 31), latest_rate=0.00046)
self.register_currency(
'SKK', 'Slovak koruna',
start_date=date(2002, 3, 1), start_rate=0.03308, stop_date=date(2008, 12, 31), latest_rate=0.05661)
|
jpvantuyl/python_koans
|
python2/koans/about_method_bindings.py
|
Python
|
mit
| 2,996
| 0.000668
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def function():
return "pineapple"
def function2():
return "tractor"
class Class(object):
def method(self):
return "parrot"
class AboutMethodBindings(Koan):
def test_methods_are_bound_to_an_object(self):
obj = Class()
self.assertEqual(True, obj.method.im_self == obj)
def test_methods_are_also_bound_to_a_function(self):
obj = Class()
self.assertEqual('parrot', obj.method())
self.assertEqual('parrot', obj.method.im_func(obj))
def test_functions_have_attributes(self):
self.assertEqual(31, len(dir(function)))
self.assertEqual(True, dir(function) == dir(Class.method.im_func))
def test_bound_methods_have_different_attributes(self):
obj = Class()
self.assertEqual(23, len(dir(obj.method)))
def test_setting_attributes_on_an_unbound_function(self):
function.cherries = 3
self.assertEqual(3, function.cherries)
def test_setting_attributes_on_a_bound_method_directly(self):
obj = Class()
try:
obj.method.cherries = 3
except AttributeError as ex:
self.assertMatch('object has no attribute', ex[0])
def test_setting_attributes_on_methods_by_accessing_the_inner_function(self):
obj = Class()
obj.method.im_func.cherries = 3
self.assertEqual(3, obj.method.cherries)
def test_functions_can_have_inner_functions(self):
|
function2.get_fruit
|
= function
self.assertEqual('pineapple', function2.get_fruit())
def test_inner_functions_are_unbound(self):
function2.get_fruit = function
try:
cls = function2.get_fruit.im_self
except AttributeError as ex:
self.assertMatch('object has no attribute', ex[0])
# ------------------------------------------------------------------
class BoundClass(object):
def __get__(self, obj, cls):
return (self, obj, cls)
binding = BoundClass()
def test_get_descriptor_resolves_attribute_binding(self):
bound_obj, binding_owner, owner_type = self.binding
# Look at BoundClass.__get__():
# bound_obj = self
# binding_owner = obj
# owner_type = cls
self.assertEqual('BoundClass', bound_obj.__class__.__name__)
self.assertEqual('AboutMethodBindings', binding_owner.__class__.__name__)
self.assertEqual(AboutMethodBindings, owner_type)
# ------------------------------------------------------------------
class SuperColor(object):
def __init__(self):
self.choice = None
def __set__(self, obj, val):
self.choice = val
color = SuperColor()
def test_set_descriptor_changes_behavior_of_attribute_assignment(self):
self.assertEqual(None, self.color.choice)
self.color = 'purple'
self.assertEqual('purple', self.color.choice)
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/SGIX/shadow.py
|
Python
|
lgpl-3.0
| 750
| 0.024
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SGIX_shadow'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SGIX_shadow',error_checker=_errors._error_checker)
GL_TEXTURE_COMPARE_OPERATOR_SGIX=_C('GL_TEXTURE_COMPARE_OPERATOR_SGIX'
|
,0x819B)
GL_TEXTURE_COMPARE_SGIX=_C('
|
GL_TEXTURE_COMPARE_SGIX',0x819A)
GL_TEXTURE_GEQUAL_R_SGIX=_C('GL_TEXTURE_GEQUAL_R_SGIX',0x819D)
GL_TEXTURE_LEQUAL_R_SGIX=_C('GL_TEXTURE_LEQUAL_R_SGIX',0x819C)
|
EdinburghGenomics/clarity_scripts
|
scripts/generate_hamilton_input_UPL.py
|
Python
|
mit
| 4,957
| 0.00585
|
#!/usr/bin/env python
import csv
import sys
from EPPs.common import StepEPP
class GenerateHamiltonInputUPL(StepEPP):
"""Generate a CSV containing the necessary information to batch up to 9 User Prepared Library receipt plates into
one DCT plate. The Hamilton requires input and output plate containers and well positions from the LIMS as well as
the volume to be pipetted, which is taken from the step UDF "DNA Volume (uL)" - this is a constant and can only be
updated with a LIMS configuration change."""
# additional argument required for the location of the Hamilton input file so def __init__ customised
def __init__(self, argv=None):
super().__init__(argv)
self.hamilton_input = self.cmd_args.hamilton_input
@staticmethod
def add_args(argparser):
argparser.add_argument(
'-i', '--hamilton_input', type=str, required=True, help='Hamilton input file generated by the LIMS'
)
def _run(self):
# csv_dict will be a dictionary that consists of the lines to be present in the Hamilton input file. These are
# then sorted into correct order and added to the csv_array which is used to write the file
csv_dict = {}
csv_array = []
# define the column headers that will be used in the Hamilton input file and add to the csv_array to be
# used to write the file
csv_column_headers = ['Input Plate', 'Input Well', 'Output Plate', 'Output Well', 'DNA Volume', 'TE Volume']
csv_array.append(csv_column_headers)
# define the sets for listing the unique input and output containers
unique_input_containers = set()
unique_output_containers = set()
# obtain all of the inputs for the step
all_inputs = self.process.all_inputs()
# find all the inputs for the step that are analytes (i.e. samples and not associated files)
for artifact in all_inputs:
if artifact.type == 'Analyte':
output = self.process.outputs_per_input(artifact.id, Analyte=True)
# the script is only compatible with 1 output for each input i.e. replicates are not allowed
if len(output) > 1:
print('Multiple outputs found for an input %s. This step is not compatible with replicates.' % artifact.name)
sys.exit(1)
# build a list of the unique input containers for checking that no more than 9 are present (this is due
# to a deck limit on the Hamilton) and for sorting the sample locations by input plate. Build a list of
# unique output containers as no more than 1 plate
unique_input_containers.add(artifact.container.name)
unique_output_containers.add(output[0].container.name)
# assemble each line of the Hamilton input file in the correct structure for the Hamilton
csv_line = [artifact.container.name, artifact.location[1], output[0].container.name, output[0].location[1],
self.process.udf['DNA Volume (uL)'], '0']
# build a dictionary of the lines for the Hamilton input file with a key that facilitates the lines
# being by input container then column then row
csv_dict[artifact.container.name + artifact.location[1]] = csv_line
# check the number of input containers
if len(unique_input_containers) > 9:
print('Maximum number of input plates is 9. There are %s output plates in the step.' % len(unique_input_containers))
sys.exit(1)
# check the number of output containers
if len(unique_output_containers) > 1:
print('Maximum number of output plates is 1. There are %s output plates in
|
the step.' % len(unique_output_containers))
sys.exit(1)
# define the rows and columns in the input plate (standard 96 well plate pattern)
|
rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
# add the lines to the csv_array that will be used to write the Hamilton input file
for unique_input_container in sorted(unique_input_containers):
for column in columns:
for row in rows:
if unique_input_container + row + ":" + column in csv_dict.keys():
csv_array.append(csv_dict[unique_input_container + row + ":" + column])
# create and write the Hamilton input file, this must have the hamilton_input argument as the prefix as this is
# used by Clarity LIMS to recognise the file and attach it to the step
with open(self.hamilton_input + '-hamilton_input.csv', 'w',newline='') as f:
writer = csv.writer(f)
writer.writerows(csv_array)
if __name__ == '__main__':
GenerateHamiltonInputUPL().run()
|
knuu/competitive-programming
|
atcoder/corp/codefes2014qb_c_2.py
|
Python
|
mit
| 203
| 0.137931
|
from collections import Counter as C
i,s=lambda:C(input()),lambda t:sum(t.v
|
alues());a,b,c=i(),i(),i();a,b,N=a&c,b&c,s(c);print('NO'if any((a+b)[k]<v for k,v in c.items())|(s(a)*2<N)|(s(b)*2<N)el
|
se'YES')
|
EthanBlackburn/sync-engine
|
migrations/versions/006_add_search_tokens.py
|
Python
|
agpl-3.0
| 797
| 0.002509
|
"""Add search tokens.
Revision ID: 482338e7a7d6
Revises: 41a7e825d108
Create Date: 2014-03-18 00:16:49.525732
"""
# revision identifiers, used by Alembic.
revision = '482338e7a7d6'
down_revision = 'adc646e1f11'
from alembic import op
import sql
|
alchemy as sa
def upgrade():
op.create_table(
'searchtoken',
sa.Column('id', sa.Integer(), nullable=False
|
),
sa.Column('token', sa.String(length=255), nullable=True),
sa.Column('source', sa.Enum('name', 'email_address'), nullable=True),
sa.Column('contact_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('searchtoken')
|
randomshinichi/QRL
|
src/qrl/core/txs/TokenTransaction.py
|
Python
|
mit
| 9,390
| 0.002556
|
from pyqrllib.pyqrllib import bin2hstr, QRLHelper
from qrl.core import config
from qrl.core.AddressState import AddressState
from qrl.core.misc import logger
from qrl.core.txs.Transaction import Transaction
class TokenTransaction(Transaction):
"""
TokenTransaction to create new Token.
"""
def __init__(self, protobuf_transaction=None):
super(TokenTransaction, self).__init__(protobuf_transaction)
@property
def symbol(self):
return self._data.token.symbol
@property
def name(self):
return self._data.token.name
@property
def owner(self):
return self._data.token.owner
@property
def decimals(self):
return self._data.token.decimals
@property
def initial_balances(self):
return self._data.token.initial_balances
def get_data_bytes(self):
data_bytes = (self.master_addr +
self.fee.to_bytes(8, byteorder='big', signed=False) +
self.symbol +
self.name +
self.owner +
self._data.token.decimals.to_bytes(8, byteorder='big', signed=False))
for initial_balance in self._data.token.initial_balances:
data_bytes += initial_balance.address
data_bytes += initial_balance.amount.to_bytes(8, byteorder='big', signed=False)
return data_bytes
@staticmethod
def create(symbol: bytes,
name: bytes,
owner: bytes,
decimals: int,
initial_balances: list,
fee: int,
xmss_pk: bytes,
master_addr: bytes = None):
transaction = TokenTransaction()
if master_addr:
transaction._data.master_addr = master_addr
transaction._data.public_key = bytes(xmss_pk)
transaction._data.token.symbol = symbol
transaction._data.token.name = name
transaction._data.token.owner = owner
transaction._data.token.decimals = decimals
for initial_balance in initial_balances:
transaction._data.token.initial_balances.extend([initial_balance])
transaction._data.fee = int(fee)
transaction.validate_or_raise(verify_signature=False)
return transaction
def _validate_custom(self):
if len(self.symbol) > config.dev.max_token_symbol_length:
logger.warning('Token Symbol Length exceeds maximum limit')
logger.warning('Found Symbol Length %s', len(self.symbol))
logger.warning('Expected Symbol length %s', config.dev.max_token_symbol_length)
return False
if len(self.name) > config.dev.max_token_name_length:
logger.warning('Token Name Length exceeds maximum limit')
logger.warning('Found Name Length %s', len(self.symbol))
logger.warning('Expected Name length %s', config.dev.max_token_name_length)
return False
if len(self.symbol) == 0:
logger.warning('Missing Token Symbol')
return False
if len(self.name) == 0:
logger.warning('Missing Token Name')
return False
if len(self.initial_balances) == 0:
logger.warning('Invalid Token Transaction, without any initial balance')
return False
sum_of_initial_balances = 0
for initial_balance in self.initial_balances:
sum_of_initial_balances += initial_balance.amount
if initial_balance.amount <= 0:
logger.warning('Invalid Initial Amount in Token Transaction')
logger.warning('Address %s | Amount %s', initial_balance.address, initial_balance.amount)
return False
allowed_decimals = self.calc_allowed_decimals(sum_of_initial_balances // 10 ** self.decimals)
if self.decimals > allowed_decimals:
logger.warning('Decimal is greater than maximum allowed decimal')
logger.warning('Allowed Decimal %s', allowed_decimals)
logger.warning('Decimals Found %s', self.decimals)
return False
if self.fee < 0:
raise ValueError('TokenTransaction [%s] Invalid Fee = %d', bin2hstr(self.txhash), self.fee)
return True
# checks new tx validity based upon node statedb and node mempool.
def validate_extended(self, addr_from_state: AddressState, addr_from_pk_state: AddressState):
if not self.validate_slave(addr_from_state, addr_from_pk_state):
return False
tx_balance = addr_from_state.balance
if not AddressState.address_is_valid(self.addr_from):
logger.warning('Invalid address addr_from: %s', bin2hstr(self.addr_from))
return False
if not AddressState.address_is_valid(self.owner):
logger.warning('Invalid address owner_addr: %s', bin2hstr(self.owner))
return False
for address_balance in self.initial_balances:
if not AddressState.address_is_valid(address_balance.address):
logger.warning('Invalid address in initial_balances: %s', bin2hstr(address_balance.address))
return False
if tx_balance < self.fee:
logger.info('TokenTxn State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash))
logger.info('balance: %s, Fee: %s', tx_balance, self.fee)
return False
if addr_from_pk_state.ots_key_reuse(self.ots_key):
logger.info('TokenTxn State validation failed for %s because: OTS Public key re-use detected',
bin2hstr(self.txhash))
return False
return True
def apply_state_changes(self, addresses_state):
addr_from_pk = bytes(QRLHelper.getAddress(self.PK))
owner_processed = False
addr_from_processed = False
addr_from_pk_processed = False
for initial_balance in self.initial_balances:
if initial_balance.address == self.owner:
owner_processed = True
if initial_balance.address == self.addr_from:
addr_from_processed = True
if initial_balance.address == addr_from_pk:
addr_from_pk_processed = True
if initial_balance.address in addresses_state:
addresses_state[initial_balance.address].update_token_balance(self.txhash, initial_balance.amount)
addresses_state[initial_balance.address].transaction_hashes.append(self.txhash)
if self.owner in addresses_state and not owner_processed:
addresses_state[self.owner].transaction_hashes.append(self.txhash)
if self.addr_from in addresses_state:
addresses_state[self.addr_from].balance -= self.fee
if not addr_from_processed and self.addr_from != self.owner:
addresses_state[self.addr_from].transaction_hashes.append(self.txhash)
if addr_from_pk in addresses_state:
if self.addr_from != addr_from_pk and addr_from_pk != self.owner:
if not addr_from_pk_processed:
addresses_state[addr_from_pk].transaction_hashes.append(self.txhash)
addresses_state[addr_from_pk].increase_nonce()
addresses_state[addr_from_pk].set_ots_key(self.ots_key)
def revert_state_changes(self, addresses_state, chain_manager):
addr_from_pk = bytes(QRLHelper.getAddress(self.PK))
owner_processed = False
addr_from_processed = False
addr_from_pk_processed = False
for initial_b
|
alance in self.initial_balances:
if initial_balance.address == self.owner:
owner_processed = True
if initial_balance.address == self.addr_from:
addr_from_processed = True
if initial_balance.address == add
|
r_from_pk:
addr_from_pk_processed = True
if initial_balance.address in addresses_state:
addresses_state[initial_balance.address].update_token_balance(self.txhash,
|
ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert
|
client/client_actions/file_fingerprint.py
|
Python
|
apache-2.0
| 2,271
| 0.010128
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Action to fingerprint files on the client."""
import hashlib
from grr.parsers import fingerprint
from grr.client import vfs
from grr.client.client_actions import standard
from grr.lib import rdfvalue
class FingerprintFile(standard.ReadBuffer):
"""Apply a set of fingerprinting methods to a file."""
in_rdfvalue = rdfvalue.FingerprintRequest
out_rdfvalue = rdfvalue.FingerprintResponse
_hash_types = {
rdfvalue.FingerprintTuple.Hash.MD5: hashlib.md5,
rdfvalue.FingerprintTuple.Hash.SHA1: hashlib.sha1,
rdfvalue.FingerprintTuple.Hash.SHA256: hashlib.sha256,
}
_fingerprint_types = {
|
rdfvalue.FingerprintTuple.Type.FPT_GENERIC: (
fingerprint.Fingerprinter.EvalGeneric),
rdfvalu
|
e.FingerprintTuple.Type.FPT_PE_COFF: (
fingerprint.Fingerprinter.EvalPecoff),
}
def Run(self, args):
"""Fingerprint a file."""
with vfs.VFSOpen(args.pathspec,
progress_callback=self.Progress) as file_obj:
fingerprinter = fingerprint.Fingerprinter(file_obj)
response = rdfvalue.FingerprintResponse()
response.pathspec = file_obj.pathspec
if args.tuples:
tuples = args.tuples
else:
# There are none selected -- we will cover everything
tuples = list()
for k in self._fingerprint_types.iterkeys():
tuples.append(rdfvalue.FingerprintTuple(fp_type=k))
for finger in tuples:
hashers = [self._hash_types[h] for h in finger.hashers] or None
if finger.fp_type in self._fingerprint_types:
invoke = self._fingerprint_types[finger.fp_type]
res = invoke(fingerprinter, hashers)
if res:
response.matching_types.append(finger.fp_type)
else:
raise RuntimeError("Encountered unknown fingerprint type. %s" %
finger.fp_type)
# Structure of the results is a list of dicts, each containing the
# name of the hashing method, hashes for enabled hash algorithms,
# and auxilliary data where present (e.g. signature blobs).
# Also see Fingerprint:HashIt()
response.results = fingerprinter.HashIt()
self.SendReply(response)
|
jaak-s/chemblnet
|
models/vaffl.py
|
Python
|
mit
| 7,837
| 0.016843
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mat", type=str, help="mat file with observations X and side info", required=True)
parser.add_argument("--epochs", type=int, help="number of epochs", default = 2000)
parser.add_argument("--hsize", type=int, help="size of the hidden layer", default = 30)
parser.add_argument("--batch-size", type=int, help="batch size", default = 512)
args = parser.parse_args()
import tensorflow as tf
import scipy.io
import numpy as np
import chemblnet as cn
import chemblnet.vbutils as vb
data = scipy.io.matlab.loadmat(args.mat)
label = data["X"]
Fu = data["Fu"].todense()
Fv = data["Fv"].todense()
# 109, 167, 168, 204, 214, 215
Ytrain, Ytest = cn.make_train_test(label, 0.5)
Ytrain = Ytrain.tocsr()
Ytest = Ytest.tocsr()
# learning parameters
Y_prec = 1.5
h1_size = args.hsize
batch_size = args.batch_size
lrate = 1e-1
lrate_decay = 1.0
print("Data file: %s" % args.mat)
print("Y size: [%d, %d]" % (label.shape[0], label.shape[1]))
print("Num row feat: %d" % Fu.shape[1])
print("Num col feat: %d" % Fv.shape[1])
print("Test stdev: %.4f" % np.std( Ytest.data ))
print("-----------------------")
print("Num epochs: %d" % args.epochs)
print("Hidden size: %d" % args.hsize)
print("Learning rate: %.1e" % lrate)
print("Batch size: %d" % batch_size)
print("-----------------------")
extra_info = False
## y_val is a vector of values and y_coord gives their coordinates
y_val = tf.placeholder(tf.float32, name="y_val")
y_coord = tf.placeholder(tf.int32, shape=[None, 2], name="y_coord")
#y_idx_u = tf.placeholder(tf.int64)
#y_idx_v = tf.placeholder(tf.int64)
x_u = tf.placeholder(tf.float32, shape=[None, Fu.shape[1]], name="x_u")
x_v = tf.placeholder(tf.float32, shape=[None, Fv.shape[1]], name="x_v")
u_idx = tf.placeholder(tf.int64, name="u_idx")
#v_idx = tf.placeholder(tf.int64, name="v_idx")
learning_rate = tf.placeholder(tf.float32, name = "learning_rate")
## ratio of total training points to mini-batch training points, for the current batch
tb_ratio = tf.placeholder(tf.float32, name = "tb_ratio")
bsize = tf.placeholder(tf.float32, name = "bsize")
## model
#beta_u = vb.NormalGammaUni("beta_u", shape = [Fu.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False)
#beta_v = vb.NormalGammaUni("beta_v", shape = [Fv.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False)
U = vb.NormalGammaUni("U", shape = [Ytrain.shape[0], h1_size], initial_stdev = 1.0, fixed_prec = False)
V = vb.NormalGammaUni("V", shape = [Ytrain.shape[1], h1_size], initial_stdev = 1.0, fixed_prec = False)
global_mean = tf.constant(Ytrain.data.mean(), dtype=tf.float32)
## means
Umean_b = tf.gather(U.mean, u_idx)
Vmean_b = V.mean
#h_u = tf.matmul(x_u, beta_u.mean) + Umean_b
#h_u = tf.matmul(x_u, beta_u.mean) + Umean_b
h_u = Umean_b
h_v = Vmean_b
y_pred = tf.matmul(h_u, h_v, transpose_b=True)
y_pred_b = global_mean + tf.gather_nd(y_pred, y_coord)
y_sse = tf.reduce_sum( tf.square(y_val - y_pred_b) )
y_loss = Y_prec / 2.0 * y_sse
## variance
Uvar_b = tf.exp(tf.gather(U.logvar, u_idx))
Vvar_b = V.var
#h_u_var = tf.matmul(tf.square(x_u), beta_u.var) + Uvar_b
#h_v_var = tf.matmul(tf.square(x_v), beta_v.var) + Vvar_b
h_u_var = Uvar_b
h_v_var = Vvar_b
y_var = Y_prec / 2.0 * tf.matmul(h_u_var, h_v_var + tf.square(h_v), transpose_b=True) + Y_prec / 2.0 * tf.matmul(tf.square(h_u), h_v_var, transpose_b=True)
var_loss = tf.gather_nd(y_var, y_coord)
L_D = tb_ratio * (y_loss + var_loss)
#L_prior = beta_u.prec_div() + beta_v.prec_div() + U.prec_div() + V.prec_div() + beta_u.normal_div() + beta_v.normal_div() + U.normal_div_partial(Umean_b, Uvar_b, bsize) + V.normal_div()
L_prior = U.prec_div() + V.prec_div() + U.normal_div() + V.normal_div()
loss = L_D + L_prior
train_op = tf.train.AdagradOptimizer(learning_rate).minimize(loss)
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#train_op = tf.train.MomentumOptimizer(1e-7, 0.90).minimize(loss)
######################################################
def select_y(X, row_idx):
Xtmp = X[row_idx]
return np.column_stack(Xtmp.nonzero()), Xtmp.data.astype(np.float32), [0, 0]
rIdx = np.random.permutation(Ytrain.shape[0])
# ---------- test data ------------- #
Yte_coord, Yte_values, Yte_shape = select_y(Ytest, np.arange(Ytest.shape[0]))
# ------- train data (all) --------- #
Ytr_coord, Ytr_values, Ytr_shape = select_y(Ytrain, np.arange(Ytrain.shape[0]))
sess = tf.Session()
if True:
sess.run(tf.global_variables_initializer())
for epoch in range(args.epochs):
rIdx = np.random.permutation(Ytrain.shape[0])
## mini-batch loop
for start in np.arange(0, Ytrain.shape[0], batch_size):
if start + batch_size > Ytrain.shape[0]:
break
idx = rIdx[start : start + batch_size]
by_coord, by_values, by_shape = select_y(Ytrain, idx)
sess.run(train_op, feed_dict={x_u: Fu[idx,:],
x_v: Fv,
y_coord: by_coord,
y_val: by_values,
u_idx: idx,
tb_ratio: Ytrain.shape[0] / float(len(idx)),#Ytrain.nnz / float(by_values.shape[0]),
learning_rate: lrate,
bsize: batch_size
})
## TODO: check from here
## epoch's Ytest error
if epoch % 1 == 0:
test_y_pred = sess.run(y_pred_b,
feed_dict = {x_u: Fu,
x_v: Fv,
y_coord: Yte_coord,
y_val: Yte_values,
u_idx: np.arange(Ytrain.shape[0])})
test_rmse = np.sqrt(np.mean(np.square(test_y_pred - Yte_values)))
train_y_pred = sess.run(y_pred_b,
feed_dict = {x_u: Fu,
x_v: Fv,
y_coord: Ytr_coord,
y_val: Ytr_values,
u_idx: np.arange(Ytrain.shape[0])})
train_rmse = np.sqrt(np.mean(np.square(train_y_pred - Ytr_values)))
#L_D_tr, loss_tr, beta_u, beta_v = sess.run([L_D, loss, beta.prec_div(), beta.normal_div()],
# feed_dict={x_indices: Xi,
# x_shape: Xs,
# x_ids_v
|
al: Xv,
# x_idx_comp: Xindices,
# y_idx_comp: Ytr_idx_comp,
# y_idx_prot: Ytr_idx_prot,
# y_val: Ytr_val,
#
|
tb_ratio: 1.0,
# bsize: Ytrain.shape[0]
# })
# beta_l2 = np.sqrt(sess.run(tf.nn.l2_loss(beta.mean)))
# beta_std_min = np.sqrt(sess.run(tf.reduce_min(beta.var)))
# beta_prec = sess.run(beta.prec)
# V_prec = sess.run(V.prec)
# V_l2 = np.sqrt(sess.run(tf.nn.l2_loss(V.mean)))
# Z_prec = sess.run(Z.prec)
# #W2_l2 = sess.run(tf.nn.l2_loss(W2))
# test_rmse = np.sqrt( test_sse / Yte_val.shape[0])
# train_rmse = np.sqrt( train_sse / Ytr_val.shape[0])
if epoch % 20 == 0:
print("Epoch\tRMSE(te, tr)\t\t|")
print("%3d.\t%.5f %.5f\t|" % (epoch, test_rmse, train_rmse))
if extra_info:
#print("beta: [%s]" % beta.summarize(sess))
#print("Z: [%s]" % Z.summarize(sess))
print("V: [%s]" % V.summarize(sess))
|
btenaglia/hpc-historias-clinicas
|
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
Python
|
bsd-3-clause
| 1,309
| 0.002292
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0006_auto_20150413_0001'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
|
field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468359), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
|
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468307), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='ubicaciones',
name='sala',
field=models.CharField(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')]),
preserve_default=True,
),
]
|
oskyar/test-TFG
|
TFG/apps/subject/models.py
|
Python
|
gpl-2.0
| 2,996
| 0.000334
|
__author__ = 'oskyar'
from django.db import models
from django.utils.translation import ugettext as _
from s3direct.fields import S3DirectField
from smart_selects.db_fields import ChainedManyToManyField
# Manager de Asignatura
class SubjectManager(models.Manager):
def owner(self, pk_subject):
return self.get(pk=pk_subject).teacher
def by_owner(self, userProfile):
return self.filter(teacher=userProfile)
def get_num_questions(self, subject, type=None):
num_questions = 0
for topic in subject.topics.all():
if type:
for subtopic in topic.subtopics.all():
num_questions += subtopic.questions.filter(type=type).count()
else:
for subtopic in topic.subtopics.all():
num_questions += subtopic.questions.all().count()
return num_questions
def get_all_questions(self, subject, type=None):
questions = list()
for topic in subject.topics.all():
if type:
for subtopic in topic.subtopics.all():
questions += subtopic.questions.filter(type=type)
else:
for subtopic in topic.subtopics.all():
questions += subtopic.questions.all()
return questions
# Asignatura.
class Subject(models.Model):
# id = Id creada por defecto por django
teacher = models.ForeignKey(
'user.UserProfile',
related_name='subjects')
students = ChainedManyToManyField(
'user.UserProfile',
chained_field='student',
chained_model_field='user',
auto_choose=True,
related_name="my_subjects")
name = models.CharField(
max_length=128,
blank=False,
null=False,
verbose_name=_("Nombre de la asignatura"))
description = models.CharField(
max_length=512,
blank=False,
null=False,
verbose_name=_("Breve descripción, máximo 512 caracteres"))
category = models.CharField(
max_length=75,
blank=False,
null=False,
verbose_name=_("Categoría"))
test_opt = models.BooleanField(
blank=False,
null=False,
verbose_name=_("Examen final directo"))
capacity = models.IntegerField(
null=True,
verbose_name=_("Nº máx. alumnos"))
image = S3DirectField(
dest='subjects',
blank=True,
null=True,
verbose_name="Imagen de la asignatura")
created_on = models.DateTimeField(blank=True, null=False)
# pos_image = models.CharField(blank=True, null=True, max_length=250)
objects = SubjectManager()
class Meta:
per
|
missions = (
('view_subject', 'View detail Subject'),
('register_subject', 'Student registers of subject'),
('unregister_subject', 'Student unregisters of subject')
)
def __str__(self):
return self.name + "
|
(" + self.category + ")"
|
LowerSilesians/geo-squizzy
|
tests/gs_socket/__init__.py
|
Python
|
mit
| 19
| 0
|
__
|
author__ = 'ing'
| |
arnaudfr/echec
|
tests/test_players.py
|
Python
|
gpl-3.0
| 1,120
| 0.001786
|
# -*- coding: utf-8 -*-
from src.constant import *
import unittest
from src.game import Game
|
class TestPlayers(unittest.TestCase):
# Init a player
def test_initPlayer(self):
game = Game()
player = game.createPlayer()
self.assertEqual(player._id, 0)
# Get a valid player
def test_getPlayer(self):
game = Game()
player0 = game.createPlayer()
player1 = game.createPlayer()
self.assertEqual(player0._id, 0)
self.assertEqual(player1._
|
id, 1)
playerN = game.getPlayer(0)
self.assertEqual(playerN._id, 0)
playerN = game.getPlayer(1)
self.assertEqual(playerN._id, 1)
# Get an invalid player
def test_getUnvalidPlayer(self):
game = Game()
player = game.getPlayer(0)
self.assertIsNone(player)
# Set to many players
def test_tooManyPlayers(self):
game = Game()
for i in range(1,5):
game.createPlayer()
player = game.getPlayer(2)
self.assertEqual(player._id, 2)
player = game.getPlayer(5)
self.assertIsNone(player)
|
mlperf/training_results_v0.7
|
Google/benchmarks/resnet/implementations/resnet-cloud-TF2.0-tpu-v3-32/tf2_common/modeling/activations/swish.py
|
Python
|
apache-2.0
| 2,452
| 0.00367
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Customized Swish activation."""
from __future_
|
_ import absolute_import
|
from __future__ import division
from __future__ import print_function
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
def simple_swish(features):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.sigmoid(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def hard_swish(features):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.)
@tf.keras.utils.register_keras_serializable(package='Text')
def identity(features):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return tf.identity(features)
|
igordejanovic/textx-tools
|
txtools/templates/lang/copy/pack_name/__init__.py
|
Python
|
mit
| 25
| 0
|
__ve
|
rsion__ = "0.1.dev0"
| |
praus/shapy
|
tests/netlink/test_htb_class.py
|
Python
|
mit
| 1,941
| 0.014426
|
import unittest
import socket
import os
from shapy.framework.netlink.constants import *
from shapy.framework.netlink.message import *
from shapy.framework.netlink.tc import *
from shapy.framework.netlink.htb import *
from shapy.framework.netlink.connection import Connection
from tests import TCTestCase
class TestClass(TCTestCase):
def test_add_class(self):
self.qhandle = 0x1 << 16 # | 0x1 # major:minor, 1:
self.add_htb_qdisc()
handle = 0x1 << 16 | 0x1
rate = 256*1000
mtu = 1600
this_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(this_dir, 'htb_add_class.data'), 'rb') as f:
data = f.read()
#init = Attr(TCA_HTB_INIT, HTBParms(rate, rate).pack()+data[36+8+4+48:])
|
init = Attr(TCA_HTB_INIT,
HTBParms(rate, rate).pack() +
RTab(rate, mtu).pack() + CTab(rate, mtu).pack())
tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, handle, self.qhandle, 0,
[Attr(TCA_KIND, 'htb\0'), init])
msg = Message(type=RTM_NEWTCLASS,
flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK,
|
service_template=tcm)
self.conn.send(msg)
self.check_ack(self.conn.recv())
self.delete_root_qdisc()
def add_htb_qdisc(self):
tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, self.qhandle, TC_H_ROOT, 0,
[Attr(TCA_KIND, 'htb\0'), HTBQdiscAttr(defcls=0x1ff)])
msg = Message(type=RTM_NEWQDISC,
flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK,
service_template=tcm)
self.conn.send(msg)
r = self.conn.recv()
self.check_ack(r)
return r
|
FreeCodeCampRoma/precision_school-management
|
precision/pages/apps.py
|
Python
|
mit
| 122
| 0
|
from django.apps import AppConfig
class PagesConfig(AppConfig):
|
name = 'precision.pages'
verbose_name = "P
|
ages"
|
openego/ego.io
|
egoio/db_tables/demand.py
|
Python
|
agpl-3.0
| 6,745
| 0
|
# coding: utf-8
from sqlalchemy import Column, Float, Integer, Numeric, String, Table, Text
from geoalchemy2.types import Geometry
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class EgoDemandFederalstate(Base):
__tablename__ = 'ego_demand_federalstate'
__table_args__ = {'schema': 'demand'}
eu_code = Column(String(7), primary_key=True)
federal_states = Column(String)
elec_consumption_households = Column(Float(53))
elec_consumption_industry = Column(Float(53))
elec_consumption_tertiary_sector = Column(Float(53))
population = Column(Integer)
elec_consumption_households_per_person = Column(Float(53))
class EgoDpLoadarea(Base):
__tablename__ = 'ego_dp_loadarea'
__table_args__ = {'schema': 'demand'}
version = Column(Text, primary_key=True, nullable=False)
id = Column(Integer, primary_key=True, nullable=False)
subst_id = Column(Integer)
area_ha = Column(Float(53))
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
otg_id = Column(Integer)
un_id = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Float(53))
ioer_sum = Column(Float(53))
ioer_count = Column(Integer)
ioer_density = Column(Float(53))
sector_area_residential = Column(Float(53))
sector_area_retail = Column(Float(53))
sector_area_industrial = Column(Float(53))
sector_area_agricultural = Column(Float(53))
sector_area_sum = Column(Float(53))
sector_share_residential = Column(Float(53))
sector_share_retail = Column(Float(53))
sector_share_industrial = Column(Float(53))
sector_share_agricultural = Column(Float(53))
sector_share_sum = C
|
olumn(Float(53))
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Float(53))
sector_consumption_retail = Column(Float(53))
sector_consumption_industrial = Column(Float(53))
sector_consumption_agricultural = Column(Float(53))
sector_consumption_sum = Column(Float(53))
sector
|
_peakload_retail = Column(Float(53))
sector_peakload_residential = Column(Float(53))
sector_peakload_industrial = Column(Float(53))
sector_peakload_agricultural = Column(Float(53))
geom_centroid = Column(Geometry('POINT', 3035))
geom_surfacepoint = Column(Geometry('POINT', 3035))
geom_centre = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_dp_loadarea_v0_4_3_mview = Table(
'ego_dp_loadarea_v0_4_3_mview', metadata,
Column('version', Text),
Column('id', Integer, unique=True),
Column('subst_id', Integer),
Column('area_ha', Numeric),
Column('nuts', String(5)),
Column('rs_0', String(12)),
Column('ags_0', String(12)),
Column('otg_id', Integer),
Column('un_id', Integer),
Column('zensus_sum', Integer),
Column('zensus_count', Integer),
Column('zensus_density', Numeric),
Column('ioer_sum', Numeric),
Column('ioer_count', Integer),
Column('ioer_density', Numeric),
Column('sector_area_residential', Numeric),
Column('sector_area_retail', Numeric),
Column('sector_area_industrial', Numeric),
Column('sector_area_agricultural', Numeric),
Column('sector_area_sum', Numeric),
Column('sector_share_residential', Numeric),
Column('sector_share_retail', Numeric),
Column('sector_share_industrial', Numeric),
Column('sector_share_agricultural', Numeric),
Column('sector_share_sum', Numeric),
Column('sector_count_residential', Integer),
Column('sector_count_retail', Integer),
Column('sector_count_industrial', Integer),
Column('sector_count_agricultural', Integer),
Column('sector_count_sum', Integer),
Column('sector_consumption_residential', Float(53)),
Column('sector_consumption_retail', Float(53)),
Column('sector_consumption_industrial', Float(53)),
Column('sector_consumption_agricultural', Float(53)),
Column('sector_consumption_sum', Float(53)),
Column('sector_peakload_retail', Float(53)),
Column('sector_peakload_residential', Float(53)),
Column('sector_peakload_industrial', Float(53)),
Column('sector_peakload_agricultural', Float(53)),
Column('geom_centroid', Geometry('POINT', 3035)),
Column('geom_surfacepoint', Geometry('POINT', 3035)),
Column('geom_centre', Geometry('POINT', 3035)),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='demand'
)
t_ego_dp_loadarea_v0_4_5_mview = Table(
'ego_dp_loadarea_v0_4_5_mview', metadata,
Column('version', Text),
Column('id', Integer, unique=True),
Column('subst_id', Integer),
Column('area_ha', Numeric),
Column('nuts', String(5)),
Column('rs_0', String(12)),
Column('ags_0', String(12)),
Column('otg_id', Integer),
Column('un_id', Integer),
Column('zensus_sum', Integer),
Column('zensus_count', Integer),
Column('zensus_density', Numeric),
Column('ioer_sum', Numeric),
Column('ioer_count', Integer),
Column('ioer_density', Numeric),
Column('sector_area_residential', Numeric),
Column('sector_area_retail', Numeric),
Column('sector_area_industrial', Numeric),
Column('sector_area_agricultural', Numeric),
Column('sector_area_sum', Numeric),
Column('sector_share_residential', Numeric),
Column('sector_share_retail', Numeric),
Column('sector_share_industrial', Numeric),
Column('sector_share_agricultural', Numeric),
Column('sector_share_sum', Numeric),
Column('sector_count_residential', Integer),
Column('sector_count_retail', Integer),
Column('sector_count_industrial', Integer),
Column('sector_count_agricultural', Integer),
Column('sector_count_sum', Integer),
Column('sector_consumption_residential', Float(53)),
Column('sector_consumption_retail', Float(53)),
Column('sector_consumption_industrial', Float(53)),
Column('sector_consumption_agricultural', Float(53)),
Column('sector_consumption_sum', Float(53)),
Column('sector_peakload_retail', Float(53)),
Column('sector_peakload_residential', Float(53)),
Column('sector_peakload_industrial', Float(53)),
Column('sector_peakload_agricultural', Float(53)),
Column('geom_centroid', Geometry('POINT', 3035)),
Column('geom_surfacepoint', Geometry('POINT', 3035)),
Column('geom_centre', Geometry('POINT', 3035)),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='demand'
)
|
0xalen/opencaster_isdb-tb
|
libs/dvbobjects/dvbobjects/utils/MJD.py
|
Python
|
gpl-2.0
| 1,057
| 0.011353
|
#! /usr/bin/env python
# This file is part of the dvbobjects library.
#
# Copyright © 2005-2013 Lorenzo Pallara l.pallara@avalpa.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software F
|
oundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABIL
|
ITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from math import floor
def MJD_convert(year, month, day):
if (month == 1) or (month == 2):
l = 1
else:
l = 0
return 14956 + day + (floor((year - l) * 365.25)) + (floor((month + 1 + l * 12) * 30.6001))
|
hipnusleo/laserjet
|
resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/test_idea.py
|
Python
|
apache-2.0
| 2,825
| 0
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import os
import pytest
from cryptography.hazmat.backends.interfaces import CipherBackend
from cryptography.hazmat.primitives.ciphers import algorithms,
|
modes
from .utils import generate_encrypt_test
from ...utils import load_nist_vectors
@pytest.mark.supported(
only_if=lambda ba
|
ckend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.ECB()
),
skip_message="Does not support IDEA ECB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeECB(object):
test_ECB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-ecb.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda **kwargs: modes.ECB(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.CBC(b"\x00" * 8)
),
skip_message="Does not support IDEA CBC",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeCBC(object):
test_CBC = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-cbc.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv))
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.OFB(b"\x00" * 8)
),
skip_message="Does not support IDEA OFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeOFB(object):
test_OFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-ofb.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv))
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.CFB(b"\x00" * 8)
),
skip_message="Does not support IDEA CFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeCFB(object):
test_CFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-cfb.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv))
)
|
agiliq/fundraiser
|
authentication/forms.py
|
Python
|
bsd-3-clause
| 626
| 0
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models
|
import User
from django.utils.tra
|
nslation import ugettext_lazy as _
class RegistrationForm(UserCreationForm):
email = forms.EmailField(help_text='Enter a valid email address')
address = forms.CharField()
website = forms.URLField()
def clean_email(self):
email = self.cleaned_data['email']
try:
User.objects.get(email=email)
except User.DoesNotExist:
return self.cleaned_data['email']
raise forms.ValidationError(_("Email already exists"))
|
bahattincinic/arguman.org
|
web/nouns/utils.py
|
Python
|
mit
| 1,877
| 0
|
# -*- coding:utf-8 -*-
import operator
import string
import operator
import itertools
import snowballstemmer
from textblob import TextBlob, Word
LOWER_MAP = {
'tr': {
ord('I'): u'ı'
}
}
STEMMERS = {
'en': snowballstemmer.stemmer('english'),
'tr': snowballstemmer.stemmer('turkish'),
}
def noun_phrases(text):
blob = TextBlob(text)
return blob
|
.tokenize()
def get_synsets(text):
return Word(to_lemma(text)).synsets
def get_lemmas(text):
word = Word(to_lemma(text))
sets = map(set, [synset.lemma_names()
for synset in word.synsets])
return map(from_lemma, reduce(operator.or_, sets))
def to_lemma(text):
return text.replace(' ', '_')
def from_lemma(text):
return text.replace('_', ' ')
def stem_word(word, language):
stemmer = STEMMERS.get(language)
if stemmer is None:
return w
|
ord
return (stemmer
.stemWord(word)
.strip(string.punctuation))
def tokenize(wordlist, language, stem=True):
return ' '.join((stem_word(word, language) if stem else word)
for word in wordlist)
def lower(text, language):
if language in LOWER_MAP:
text = text.translate(LOWER_MAP[language])
return text.lower()
def build_ngrams(text, language='en'):
blob = TextBlob(lower(text, language))
ngrams = [blob.ngrams(n=n) for n in (3, 2, 1)]
wordlists = reduce(operator.add, ngrams)
tokenized = (
tokenize(wordlist, language, stem=True)
for wordlist in wordlists)
pure = (
tokenize(wordlist, language, stem=False)
for wordlist in wordlists)
return itertools.chain(tokenized, pure)
def is_subsequence(sequence, parent):
for i in xrange(1 + len(parent) - len(sequence)):
if sequence == parent[i:i + len(sequence)]:
return True
return False
|
rarmknecht/SimpleQuine
|
mkarray.py
|
Python
|
mit
| 111
| 0.027027
|
#!/usr/bin/python
file = open('prog.txt','r')
s = ""
|
for b in file.read(
|
):
s+="%d," % ord(b)
print(s[:-1])
|
avelino/Flask-Quik
|
tests/example.py
|
Python
|
mit
| 315
| 0.003175
|
from flask import Flask
from flask.ext.quik import FlaskQuik
from flask.ext.quik import render_template
app = Flask(__name__)
quik = FlaskQuik(app)
@app.route('/', methods=['GET', 'POST'] )
def hello_quik():
return render_template('hello.html', name='quik')
app.run(host='0.0.0.0', debug=True, po
|
rt=5000)
|
|
qwergram/data-structures
|
src/test_linked_list.py
|
Python
|
mit
| 5,503
| 0
|
# -*- coding=utf-8 -*-
"""Test LinkedList for random inputs."""
import pyte
|
st
def test_linkedlist_tail_default():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList
assert LinkedList.tail is None
def test_linkedlist_construct_empty_list():
"""Test LinkedList insert command works with empty list."""
from linked_l
|
ist import LinkedList
input_ = []
linked_list_instance = LinkedList(input_)
assert linked_list_instance.tail is None
def test_linkedlist_construct_integer():
"""Test LinkedList insert command works with empty list."""
from linked_list import LinkedList
input_ = 5
linked_list_instance = LinkedList(input_)
assert linked_list_instance.tail.value == 5
def test_linkedlist_constructor_list_isnode():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList, Node
input_ = [1, 2]
linked_list_instance = LinkedList(input_)
assert isinstance(linked_list_instance.tail, Node)
def test_linkedlist_constructor_nodeval():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList, Node
input_ = [1, 2]
ll_inst = LinkedList(input_)
assert ll_inst.tail.pointer.value == Node(2, Node(1, None)).pointer.value
def test_linkedlist_constructor_nodeterm():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList
input_ = [1, 2]
linked_list_instance = LinkedList(input_)
assert linked_list_instance.tail.pointer.pointer is None
def test_linkedlist_insert_integer():
"""Test LinkedList insert command works correctly."""
from linked_list import LinkedList, Node
input_ = [1, 2]
ll_inst = LinkedList(input_)
ll_inst.insert(3)
assert ll_inst.tail.pointer.pointer.value == (Node(2, Node(1, Node(3,
None))).pointer.pointer.value
)
def test_linkedlist_insert_string():
"""Test LinkeList.insert for tail addition to Node list."""
from linked_list import LinkedList
input_ = [1, 2, 3]
linked_list_instance = LinkedList(input_)
linked_list_instance.insert("Nadia")
assert linked_list_instance.tail.pointer.pointer.pointer.value == "Nadia"
def test_linkedlist_insert_empty():
"""Test LinkedList.insert from an empty list."""
from linked_list import LinkedList
input_ = []
linked_list_instance = LinkedList(input_)
linked_list_instance.insert('a')
assert linked_list_instance.size() == 1
def test_linkedlist_pop():
"""Test LinkedList.pop for head removal."""
from linked_list import LinkedList
input_ = [1]
linked_list_instance = LinkedList(input_)
assert linked_list_instance.pop() == 1
def test_linkedlist_pop_empty():
"""Test LinkedList.pop from an empty list."""
from linked_list import LinkedList
input_ = []
linked_list_instance = LinkedList(input_)
with pytest.raises(IndexError):
linked_list_instance.pop()
def test_linkedlist_size_long():
"""Test LinkedList.size for proper length return."""
from linked_list import LinkedList
input2_ = list(range(75))
linked_list_instance2 = LinkedList(input2_)
assert linked_list_instance2.size() == len(input2_)
def test_linkedlist_size_empty():
"""Test LinkedList.size for proper length return."""
from linked_list import LinkedList
input3_ = []
linked_list_instance3 = LinkedList(input3_)
assert linked_list_instance3.size() == len(input3_)
@pytest.fixture(scope='function')
def linked_list_instance():
"""Fixture for linkedlist search test."""
from linked_list import LinkedList
input_ = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split()
return LinkedList(input_)
def test_linkedlist_search_mid(linked_list_instance):
"""Test LinkedList.search for value match and return."""
assert linked_list_instance.search("d").value == "d"
def test_linkedlist_search_head(linked_list_instance):
"""Test LinkedList.search for value match and return."""
assert linked_list_instance.search("a").value == "a"
def test_linkedlist_search_missing(linked_list_instance):
"""Test LinkedList.search for value match and return."""
assert linked_list_instance.search("norton is amazing") is None
def test_linkedlist_remove(linked_list_instance):
"""Test LinkedList.remove for proper mid-list Node removal."""
from linked_list import Node
linked_list_instance.remove(Node('y'))
assert linked_list_instance.tail.pointer.value == 'x'
def test_linkedlist_remove_tail(linked_list_instance):
"""Test LinkedList.remove for proper first Node removal."""
from linked_list import Node
linked_list_instance.remove(Node('z'))
assert linked_list_instance.tail.pointer.value == 'x'
def test_linkedlist_remove_head():
"""Test LinkedList.remove for proper last Node removal."""
from linked_list import LinkedList, Node
input_ = "a b c".split()
linked_list_instance = LinkedList(input_)
linked_list_instance.remove(Node('a'))
assert linked_list_instance.tail.pointer.pointer is None
def test_linkedlist_display():
"""Test LinkedList.display for proper string formatting."""
from linked_list import LinkedList
input_ = "a b c".split()
linked_list_instance = LinkedList(input_)
assert linked_list_instance.display() == "('c', 'b', 'a')"
|
marteinn/Skeppa
|
skeppa/ext/__init__.py
|
Python
|
mit
| 400
| 0
|
active_extensions = []
class Extension(object):
def register(self):
pass
def dispatch(event, *args, **kwargs):
for extension in active_extensions:
if not hasattr(extension, e
|
vent):
continue
getattr(extension, event)(*args, **kwargs)
def register(extension):
instance = extension()
active_extensions.append(instance)
|
instance.register()
|
siyanew/Siarobo
|
plugins/soundcloud.py
|
Python
|
mit
| 3,323
| 0.003912
|
import asyncio
import demjson
from bot import user_steps, sender, get, downloader
from message import Message
client_id = ''#YOUR CLIENT ID
async def search(query):
global guest_client_id
search_url = 'https://api.soundcloud.com/search?q=%s&facet=model&limit=30&offset=0&linked_partitioning=1&client_id='+client_id
url = search_url % query
response = await get(url)
r = demjson.decode(response)
res = []
for entity in r['collection']:
if entity['kind'] == 'track':
res.append([entity['title'], entity['permalink_url']])
return res
async def getfile(url):
response = await get(
"https://api.soundcloud.com/resolve?url={}&client_id="+client_id.format(url))
r = demjson.decode(response)
return r['stream_url'] + "?client_id="+client_id
@asyncio.coroutine
async def run(message, matches, chat_id, step):
from_id = message['from']['id']
if step == 0:
await sender(
Message(chat_id).set_text("*Please Wait*\nI'm Searching all Music with this name", parse_mode="markdown"))
user_steps[from_id] = {"name": "Soundcloud", "step": 1, "data": {}}
i = 0
show_keyboard = {'keyboard': [], "selective": True}
matches = matches.replace(" ", "+")
for song in await search(matches):
title, link = song[0], song[1]
user_steps[from_id]['data'][title] = link
show_keyboard['keyboard'].append([title])
i += 1
if i == 20:
break
if len(show_keyboard['keyboard']) in [0, 1]:
hide_keybo
|
ard = {'hide_keyboard': True, 'selective': T
|
rue}
del user_steps[from_id]
return [Message(chat_id).set_text("*Not Found*",
reply_to_message_id=message['message_id'], reply_markup=hide_keyboard,
parse_mode="markdown")]
return [Message(chat_id).set_text("Select One Of these :", reply_to_message_id=message['message_id'],
reply_markup=show_keyboard)]
elif step == 1:
try:
hide_keyboard = {'hide_keyboard': True, "selective": True}
await sender(Message(chat_id).set_text("*Please Wait*\nLet me Save this Music For You",
reply_to_message_id=message['message_id'],
reply_markup=hide_keyboard, parse_mode="markdown"))
await downloader(await getfile(user_steps[from_id]['data'][message['text']]),
"tmp/{}.mp3".format(message['text']))
del user_steps[from_id]
return [Message(chat_id).set_audio("tmp/{}.mp3".format(message['text']), title=message['text'],
performer="@Siarobot")]
except Exception as e:
del user_steps[from_id]
return [Message(chat_id).set_text("*Wrong Input*\n_Try Again_", parse_mode="markdown")]
plugin = {
"name": "Soundcloud",
"desc": "Download a Music From Sound Cloud\n\n"
"*For Start :*\n`/sc michael jackson billie jean`",
"usage": ["/sc \\[`Search`]"],
"run": run,
"sudo": False,
"patterns": ["^[/!#]sc (.*)$"]
}
|
kcarnold/autograd
|
tests/test_complex.py
|
Python
|
mit
| 1,428
| 0.016106
|
from __future__ import absolute_import
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.util import *
from autograd import grad
npr.seed(1)
def test_real_type():
fun = lambda x: np.sum(np.real(x))
df = grad(fun)
assert type(df(1.0)) == float
assert type(df(1.0j)) == complex
def test_real_if_close_type():
fun = lambda x: np.sum(np.real(x))
df = grad(fun)
assert type(df(1.0)) == float
assert type(df(1.0j)) == complex
def test_imag_type():
fun = lambda x: np.sum(np.imag(x))
df = grad(fun)
assert base_class(type(df(1.0 ))) == float
assert base_class(type(df(1.0j))) == complex
# TODO: real times imag
def test_angle_real():
fun = lambda x : to_scalar(np.angle(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
check_grads(fun, npr.rand())
check_grads(d_fun, npr.rand())
def test_angle_complex():
fun = lambda x : to_scalar(np.angle(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
check_grads(fun, npr.rand() + 1j*npr.rand())
check_grads(d_fun, npr.rand() + 1j*npr.rand())
def test_abs_real():
fun = lambda x : to_scalar(np.abs(x))
|
d_fun = lambda x: to_scalar(grad(fun)(x))
check_grads(fun, 1.1)
check_grads(d_fun, 2.1)
def test_abs_complex():
fun = lambda x : to_scalar(np.abs(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
check_gra
|
ds(fun, 1.1 + 1.2j)
check_grads(d_fun, 1.1 + 1.3j)
|
amir343/ansible-modules-extras
|
network/f5/bigip_monitor_tcp.py
|
Python
|
gpl-3.0
| 16,428
| 0.006148
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_tcp
short_description: "Manages F5 BIG-IP LTM tcp monitors"
description:
- "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
version_added: "1.4"
author: "Serge van Ginderachter (@srvg)"
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
type:
description:
- The template type of this monitor template
required: false
default: 'tcp'
choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN']
parent:
description:
- The parent template of this monitor template
required: false
default: 'tcp'
choices: [ 'tcp', 'tcp_echo', 'tcp_half_open']
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
|
ip:
description:
- IP address part of the ipport defini
|
tion. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create TCP Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-tcp
- name: BIGIP F5 | Create TCP half open Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-halftcp
- name: BIGIP F5 | Remove TCP Monitor
local_action:
module: bigip_monitor_tcp
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
with_flattened:
- f5monitors-tcp
- f5monitors-halftcp
'''
TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open']
DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower()
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_s
|
unchartedsoftware/PLUTO
|
src/rules/validateFilename.py
|
Python
|
apache-2.0
| 1,114
| 0.027828
|
from __future__ import print_function
import os.path
import re
import imp
import sys
from shutil import copyfile
import PythonAPI as api
class ValidateFilename(api.PythonAPI
|
Rule):
def __init__(self, config):
super(ValidateFilename, self).__init__(config)
def run(self, inputFile, outputFile, encoding):
# NOTE: dot syntax doesn't work for dereferencing fields on self.config because the properties are defined using UTF-8 strings.
if not "regex" in self.config:
self.error("No regex specified.")
elif not "importConfig" in self.config:
self.error("No importConfig specified in the rule config.
|
")
elif not "file" in self.config["importConfig"]:
self.error("No file specified in the rule config.importConfig.")
else:
filename = os.path.basename(self.config["importConfig"]["file"])
prog = re.compile(self.config["regex"], re.UNICODE)
if prog.match(filename) is None:
self.error(filename + " does not match the regular expression " + self.config["regex"])
# Copy the file to the output for the next rule
copyfile(inputFile, outputFile)
api.process(ValidateFilename)
|
smellman/sotmjp-website
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,580
| 0.007459
|
# -*- coding: utf-8 -*-
#
# Pinax Symposion documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 5 17:31:13 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pinax Symposion'
copyright = u'2012, Eldarion Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PinaxSymposiondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PinaxSymposion.te
|
x', u'Pinax Symposion Documentation',
u'Eldarion Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true,
|
show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pinaxsymposion', u'Pinax Symposion Documentation',
[u'Eldarion Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PinaxSymposion', u'Pinax Symposion Documentation',
u'Eldarion Team', 'PinaxSymposion', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Additional config for Django ----------------------------------------------
# Arrange for importing pycon modules to work okay given that they'll
# try to pull in Django
# See http://techblog.ironfroggy.com/2012/06/how-to-use-sphinx-autodoc-on.html
#sys.path.app
|
ooovector/qtlab_replacement
|
tunable_coupling_transmons/Misis_two_qubit_July_2019_setup.py
|
Python
|
gpl-3.0
| 12,967
| 0.003915
|
from qsweepy.instruments import *
from qsweepy import *
from qsweepy import awg_iq_multi
import numpy as np
device_settings = {'vna_address': 'TCPIP0::10.20.61.48::inst0::INSTR',
'lo1_address': 'TCPIP0::10.20.61.59::inst0::INSTR',
'lo1_timeout': 5000, 'rf_switch_address': '10.20.61.224',
'use_rf_switch': True,
'pxi_chassis_id': 0,
'hdawg_address': 'hdawg-dev8108',
'awg_tek_address': 'TCPIP0::10.20.61.186::inst0::INSTR',
'use_awg_tek': True,
'sa_address': 'TCPIP0::10.20.61.56::inst0::INSTR',
'adc_timeout': 10,
'adc_trig_rep_period': 50 * 125, # 10 kHz rate period
'adc_trig_width': 2, # 80 ns trigger length
}
cw_settings = {}
pulsed_settings = {'lo1_power': 18,
'vna_power': 16,
'ex_clock': 1000e6, # 1 GHz - clocks of some devices
'rep_rate': 20e3, # 10 kHz - pulse sequence repetition rate
# 500 ex_clocks - all waves is shorten by this amount of clock cycles
# to verify that M3202 will not miss next trigger
# (awgs are always missing trigger while they are still outputting waveform)
'global_num_points_delta': 400,
'hdawg_ch0_amplitude': 0.3,
'hdawg_ch1_amplitude': 0.3,
'hdawg_ch2_amplitude': 0.8,
'hdawg_ch3_amp
|
litude': 0.8,
'hdawg_ch4_amplitude': 0.8,
'hdawg_ch5_amplitude': 0.8,
'hdawg_ch6_amplitude': 0.8,
'hdawg_ch7_amplitude': 0.8,
'awg_tek_ch1_amplitude': 1.0,
'awg_tek_ch2_amplitude': 1.0,
'awg_tek_ch3_amplitude': 1.0,
|
'awg_tek_ch4_amplitude': 1.0,
'awg_tek_ch1_offset': 0.0,
'awg_tek_ch2_offset': 0.0,
'awg_tek_ch3_offset': 0.0,
'awg_tek_ch4_offset': 0.0,
'lo1_freq': 3.41e9,
'pna_freq': 6.07e9,
#'calibrate_delay_nop': 65536,
'calibrate_delay_nums': 200,
'trigger_readout_channel_name': 'ro_trg',
'trigger_readout_length': 200e-9,
'modem_dc_calibration_amplitude': 1.0,
'adc_nop': 1024,
'adc_nums': 50000, ## Do we need control over this? Probably, but not now... WUT THE FUCK MAN
}
class hardware_setup():
def __init__(self, device_settings, pulsed_settings):
self.device_settings = device_settings
self.pulsed_settings = pulsed_settings
self.cw_settings = cw_settings
self.hardware_state = 'undefined'
self.pna = None
self.lo1 = None
self.rf_switch = None
self.awg_tek = None
self.sa = None
self.coil_device = None
self.hdawg = None
self.adc_device = None
self.adc = None
self.ro_trg = None
self.coil = None
self.iq_devices = None
def open_devices(self):
# RF switch for making sure we know what sample we are measuring
self.pna = Agilent_N5242A('pna', address=self.device_settings['vna_address'])
self.lo1 = Agilent_E8257D('lo1', address=self.device_settings['lo1_address'])
self.lo1._visainstrument.timeout = self.device_settings['lo1_timeout']
if self.device_settings['use_rf_switch']:
self.rf_switch = nn_rf_switch('rf_switch', address=self.device_settings['rf_switch_address'])
if self.device_settings['use_awg_tek']:
self.awg_tek = Tektronix_AWG5014('awg_tek', address=self.device_settings['awg_tek_address'])
self.sa = Agilent_N9030A('pxa', address=self.device_settings['sa_address'])
self.coil_device = self.awg_tek
self.hdawg = Zurich_HDAWG1808(self.device_settings['hdawg_address'])
self.adc_device = TSW14J56_evm()
self.adc_device.timeout = self.device_settings['adc_timeout']
self.adc = TSW14J56_evm_reducer(self.adc_device)
self.adc.output_raw = True
self.adc.last_cov = False
self.adc.avg_cov = False
self.adc.resultnumber = False
self.adc_device.set_trig_src_period(self.device_settings['adc_trig_rep_period']) # 10 kHz period rate
self.adc_device.set_trig_src_width(self.device_settings['adc_trig_width']) # 80 ns trigger length
# self.hardware_state = 'undefined'
def set_pulsed_mode(self):
self.lo1.set_status(1) # turn on lo1 output
self.lo1.set_power(self.pulsed_settings['lo1_power'])
self.lo1.set_frequency(self.pulsed_settings['lo1_freq'])
self.pna.set_power(self.pulsed_settings['vna_power'])
self.pna.write("OUTP ON")
self.pna.write("SOUR1:POW1:MODE ON")
self.pna.write("SOUR1:POW2:MODE OFF")
self.pna.set_sweep_mode("CW")
self.pna.set_frequency(self.pulsed_settings['pna_freq'])
self.hdawg.stop()
self.awg_tek.stop()
self.awg_tek.set_clock(self.pulsed_settings['ex_clock']) # клок всех авгшк
self.hdawg.set_clock(self.pulsed_settings['ex_clock'])
self.hdawg.set_clock_source(1)
# setting repetition period for slave devices
# 'global_num_points_delay' is needed to verify that M3202A and other slave devices will be free
# when next trigger arrives.
global_num_points = int(np.round(
self.pulsed_settings['ex_clock'] / self.pulsed_settings['rep_rate'] - self.pulsed_settings[
'global_num_points_delta']))
# global_num_points = 20000
self.hdawg.set_nop(global_num_points)
self.hdawg.clear()
# а вот длину сэмплов, которая очевидно то же самое, нужно задавать на всех авгшках.
# хорошо, что сейчас она только одна.
# this is zashkvar WUT THE FUCK MAN
self.hdawg.set_trigger_impedance_1e3()
self.hdawg.set_dig_trig1_source([0, 0, 0, 0])
self.hdawg.set_dig_trig1_slope([1, 1, 1, 1]) # 0 - Level sensitive trigger, 1 - Rising edge trigger,
# 2 - Falling edge trigger, 3 - Rising or falling edge trigger
self.hdawg.set_dig_trig1_source([0, 0, 0, 0])
self.hdawg.set_dig_trig2_slope([1, 1, 1, 1])
self.hdawg.set_trig_level(0.6)
for sequencer in range(4):
self.hdawg.send_cur_prog(sequencer=sequencer)
self.hdawg.set_marker_out(channel=np.int(2 * sequencer), source=4) # set marker 1 to awg mark out 1 for sequencer
self.hdawg.set_marker_out(channel=np.int(2 * sequencer + 1),
source=7) # set marker 2 to awg mark out 2 for sequencer
for channel in range(8):
self.hdawg.set_amplitude(channel=channel, amplitude=self.pulsed_settings['hdawg_ch%d_amplitude'%channel])
self.hdawg.set_offset(channel=channel, offset=0 * 1.0)
self.hdawg.set_digital(channel=channel, marker=[0]*(global_num_points))
self.hdawg.set_all_outs()
self.hdawg.run()
self.awg_tek._visainstrument.write('AWGC:RMOD TRIG')
self.awg_tek._visainstrument.write('TRIG:WVAL LAST')
self.awg_tek._visainstrument.write('TRIG:IMP 1kohm')
self.awg_tek._visainstrument.write('TRIG:SLOP POS')
self.awg_tek._visainstrument.write('TRIG:LEV 0.5')
self.awg_tek._visainstrument.write('SOUR1:ROSC:FREQ 10MHz')
self.awg_tek._visainstrument.write('SOUR1:ROSC:SOUR EXT')
# awg_tek.set_trigger_mode('CONT')
self.awg_tek.set_nop(global_num_points) # репрейт нужно задавать по=хорошему только на управляющей_t
self.awg_tek.check_cached = True
for channel in range(1, 5):
self.awg_tek.set_amplitude(self.pulsed_settings['awg_tek_ch{}_amplitude'.format(channel)], channel=channel)
self.awg_tek.set_offset(self.p
|
hinnerk/py-djbdnslog
|
src/djbdnslog/scripts/__init__.py
|
Python
|
bsd-3-clause
| 197
| 0.015228
|
import sys
def check_args(argv
|
):
if len(argv) != 2:
print ("Help:\n"
"%s filename.log\n"
"filename.log = name of logfile") % argv[0]
sys.e
|
xit(1)
|
pythonitalia/pycon_site
|
p3/management/commands/partner_events.py
|
Python
|
bsd-2-clause
| 1,898
| 0.002634
|
# -*- coding: UTF-8 -*-
import haystack
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models
from conference.templatetags.conference import fare_blob
from collections import defaultdict
from datetime import datetime
from xml.sax.saxutils import escape
class Command(BaseCommand):
"""
"""
@transaction.commit_on_success
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference missing')
partner_events = defaultdict(list)
for f in models.Fare.objects.available(conference=conference).filter(ticket_type='partner'):
try:
date = datetime.strptime(fare_blob(f, 'data').split(',')[0][:-2] + ' 2011', '%B %d %Y').date()
time = datetime.strptime(fare_blob(f, 'departure'), '%H:%M').time()
except ValueError:
continue
partner_events[date].append((
|
f, time))
for sch in models.Schedule.objects.filter(conference=conference):
events =
|
list(models.Event.objects.filter(schedule=sch))
for fare, time in partner_events[sch.date]:
track_id = 'f%s' % fare.id
for e in events:
if track_id in e.get_all_tracks_names():
event = e
break
else:
event = models.Event(schedule=sch, talk=None)
event.track = 'partner-program ' + track_id
event.custom = escape(fare.name)
event.start_time = time
if time.hour < 13:
d = (13 - time.hour) * 60
else:
d = (19 - time.hour) * 60
event.duration = d
event.save()
|
rorito/django-squeezemail
|
squeezemail/management/commands/run_steps_task.py
|
Python
|
mit
| 197
| 0
|
f
|
rom django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
f
|
rom squeezemail.tasks import run_steps
run_steps.delay()
|
procool/mygw
|
globals/utils/server/http_client.py
|
Python
|
bsd-2-clause
| 1,122
| 0.017825
|
import json
from urllib2 import urlopen, HTTPError
from urllib import urlencode
import logging
class HTTPClient(object):
def __init__(self, host='localhost', port=90):
self.host = host
self.port = port
def get_serv_addr (self):
return 'http://%s:%s/' % ( self.host, self.port, )
def call_handler(self, handler, *args, **kwargs):
url = '%s%s/' % (self.get_serv_addr(), han
|
dler)
try: postdata = kwargs.pop('postdata')
except: postdata=None
for arg in args:
url += '%s/' % arg
params = urlencode(kwargs)
url = '%s?%s'% (url, params)
|
logging.debug("Request url: %s" % url)
try: response = urlopen(url, postdata)
except HTTPError as err:
raise(err)
except:
return None
## Reading data:
try:
response = response.read()
except:
return None
## Decoding to JSON:
try:
return json.loads(response)
except:
return response
|
airbnb/airflow
|
airflow/contrib/hooks/gcp_container_hook.py
|
Python
|
apache-2.0
| 1,567
| 0.002553
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`."""
import warnings
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`",
DeprecationWarning,
stacklevel=2,
)
class GKEClusterHook(GKEHook):
"""This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook
|
`."""
def __init__(self, *args, **kwargs):
warnings.warn(
"This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`.",
DeprecationWarning,
stacklevel=2,
)
super()
|
.__init__(*args, **kwargs)
|
Smashman/mods.tf
|
app/utils/utils.py
|
Python
|
gpl-3.0
| 11,564
| 0.002248
|
import ntpath
import os
import s
|
team
import zipfile
import shutil
from subprocess import check_call, CalledProcessError
from flask import flash, current_app, abort
from PIL import Image
from io import BytesIO
from werkzeug.u
|
tils import secure_filename
from ..tf2.models import all_classes, TF2BodyGroup, TF2EquipRegion
from ..mods.models import ModClassModel, ModImage
from ..models import get_or_create
from app import db, sentry
def list_from_vdf_dict(dictionary):
return_list = []
for dict_item, number in dictionary.items():
if number is not None and number > 0:
return_list.append(dict_item)
return return_list
def extract_and_image(zip_in, db_record):
"""
Extracts the uploaded zip files and generates an imagine and thumbnail from the given files.
:param zip_in:
:return:
"""
input_folder = current_app.config['UPLOADED_WORKSHOPZIPS_DEST']
output_folder = current_app.config['OUTPUT_FOLDER_LOCATION']
mod_id = db_record.id
print "Starting conversion: {}".format(zip_in)
zip_filename = os.path.join(input_folder, zip_in)
# If we have a zip file, grab the manifest
if zipfile.is_zipfile(zip_filename):
with zipfile.ZipFile(zip_filename, "r") as zip_file:
if sum(f.file_size for f in zip_file.infolist()) < 105000000:
try:
print "Opening manifest"
manifest_stream = zip_file.open('manifest.txt')
manifest_str = BytesIO(manifest_stream.read())
manifest = steam.vdf.load(manifest_str).get('asset')
except KeyError:
flash("No manifest, please upload a Workshop zip.", "danger")
return
except zipfile.BadZipfile:
flash("Archive is corrupt, please try repackaging your item before trying again.", "danger")
return
print "Converting manifest. vdf -> dict"
else:
flash("Zip is too large when extracted, min size is ~100MB", "danger")
return
else:
flash('Not a zip: {}'.format(zip_filename), "danger")
return
name = manifest['name']
try:
icon = manifest['ImportSession']['icon']
except KeyError:
icon = None
if icon:
# 'icon' can contain a lot of backslashes for reasons unknown to man, we'll get rid of them here.
icon = ntpath.normpath(icon.replace('\\', ntpath.sep))
iconUnix = os.path.normpath(icon.replace('\\', os.path.sep))
# List of files we want to extract and later pack into a VPK
to_extract = []
# Start extracting
print "Start extracting"
with zipfile.ZipFile(zip_filename) as zip_open:
for infile in zip_open.namelist():
# Only extract the contents of the game, materials or models folder
allowed_extracts = ['game', 'materials', 'models']
if '..' in infile or infile.startswith('/'):
flash("Error", "danger")
return
if ntpath.dirname(infile).split(ntpath.sep)[0] in allowed_extracts:
to_extract.append(infile)
# How many to extract
print "{} files to extract.".format(len(to_extract))
# Do extractings
print "Extracting."
safe_name = secure_filename(name)
folder_name = "{mod_id}".format(mod_id=mod_id)
os.path.altsep = '\\'
zip_open.extractall(os.path.join(output_folder, folder_name), to_extract)
if icon:
# Load the icon into a byte stream
print "Reading TGA image."
try:
tga_f = BytesIO(zip_open.read(iconUnix))
except KeyError:
tga_f = BytesIO(zip_open.read(icon))
img = Image.open(tga_f)
# Save the image as a PNG
print "Saving large PNG image"
filename = "backpack_icon_large.png"
img.save(os.path.join(output_folder, folder_name, filename))
backpack_icon_large = ModImage(filename, db_record.id, 0)
db.session.add(backpack_icon_large)
# Resize the image to make a thumbnail
print "Resizing image"
img.thumbnail((128, 128), Image.ANTIALIAS)
# Save the thumbnail
print "Saving small PNG image"
filename = "backpack_icon.png"
img.save(os.path.join(output_folder, folder_name, filename))
backpack_icon = ModImage(filename, db_record.id, 1)
db.session.add(backpack_icon)
# Fetch desired item info from manifest
items_game_info = manifest['ImportSession']['ItemSchema']
equip_regions = []
equip_region = items_game_info.get('equip_region')
if equip_region:
equip_regions.append(equip_region)
else:
equip_region_dict = items_game_info.get('equip_regions')
if equip_region_dict:
equip_regions += list_from_vdf_dict(equip_region_dict)
visuals = items_game_info.get('visuals')
bodygroups = []
if visuals:
bodygroups_dict = visuals.get('player_bodygroups')
if bodygroups_dict:
bodygroups += list_from_vdf_dict(bodygroups_dict)
used_by_classes = items_game_info.get('used_by_classes')
used_by_classes = list_from_vdf_dict(used_by_classes)
used_by_classes = [i.lower() for i in used_by_classes]
model_player = items_game_info.get('model_player')
class_models = {}
if used_by_classes and len(used_by_classes) is 1:
if model_player:
class_models.update({used_by_classes[0].lower(): model_player})
else:
return
elif not used_by_classes or len(used_by_classes) > 1:
if not used_by_classes:
used_by_classes = all_classes
model_player_per_class = items_game_info.get('model_player_per_class')
model_player_per_class = dict((k.lower(), v) for k, v in model_player_per_class.iteritems())
for tf2_class in used_by_classes:
if tf2_class.title() in all_classes:
if model_player_per_class:
class_model = model_player_per_class.get(tf2_class)
elif model_player:
class_model = model_player
else:
continue
class_and_model = {tf2_class: class_model}
class_models.update(class_and_model)
# Update database record
db_record.name = safe_name
db_record.pretty_name = manifest['ImportSession']['name']
db_record.manifest_steamid = int(manifest['steamid'], 16)
db_record.item_slot = "misc" # Only miscs have Workshop zips to date
db_record.image_inventory = items_game_info.get('image_inventory')
if bodygroups:
for bodygroup in bodygroups:
bg_db = TF2BodyGroup.query.get(bodygroup)
if bg_db:
db_record.bodygroups.append(bg_db)
if equip_regions:
for er in equip_regions:
er_db = TF2EquipRegion.query.get(er)
if er_db:
db_record.equip_regions.append(er_db)
if class_models:
for class_name, model in class_models.items():
db_record.class_model[class_name] = (get_or_create(db.session, ModClassModel, mod_id=mod_id,
class_name=class_name, model_path=model))
# And we're fin
print "Done: {}".format(db_record.zip_file)
db_record.completed = True
return db_record
def vpk_package(folder):
try:
check_call([os.path.abspath(current_app.config['VPK_BINARY_PATH']), folder])
except CalledProcessError:
sentry.captureException()
abort(500)
shutil.rmtree(folder)
def rename_copy(ext_list, dest_format):
for extension in ext_list:
for mod_path, replacement_path in dest_format.items():
to_rename = mod_path.format(ext=extension)
rename_dest = replacement_path.format(ext=extension)
dest_directory = os.path.dirname(rename_dest)
if not os.path.exists(dest_directory):
|
tempbottle/elliptics
|
tests/pytests/test_session_parameters.py
|
Python
|
lgpl-3.0
| 7,542
| 0.000928
|
# =============================================================================
# 2013+ Copyright (c) Kirill Smorodinnikov <shaitkir@gmail.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# =============================================================================
import sys
sys.path.insert(0, "") # for running from cmake
import pytest
from conftest import set_property, raises, make_session
import elliptics
io_flags = set((elliptics.io_flags.default,
elliptics.io_flags.append,
elliptics.io_flags.prepare,
elliptics.io_flags.commit,
elliptics.io_flags.overwrite,
elliptics.io_flags.nocsum,
elliptics.io_flags.plain_write,
elliptics.io_flags.nodata,
elliptics.io_flags.cache,
elliptics.io_flags.cache_only,
elliptics.io_flags.cache_remove_from_disk))
command_flags = set((elliptics.command_flags.default,
elliptics.command_flags.direct,
elliptics.command_flags.nolock))
exceptions_policy = set((elliptics.exceptions_policy.no_exceptions,
elliptics.exceptions_policy.throw_at_start,
elliptics.exceptions_policy.throw_at_wait,
elliptics.exceptions_policy.throw_at_get,
elliptics.exceptions_policy.throw_at_iterator_end,
elliptics.exceptions_policy.default_exceptions))
filters = set((elliptics.filters.positive,
elliptics.filters.positive,
elliptics.filters.positive_with_ack,
elliptics.filters.positive_final,
elliptics.filters.negative,
elliptics.filters.negative_with_ack,
elliptics.filters.negative_final,
elliptics.filters.all,
elliptics.filters.all_with_ack,
elliptics.filters.all_final))
checkers = set((elliptics.checkers.no_check,
elliptics.checkers.at_least_one,
elliptics.checkers.all,
elliptics.checkers.quorum))
class TestSession:
def test_flags(self):
assert set(elliptics.io_flags.values.values()) == io_flags
assert set(elliptics.command_flags.values.values()) == command_flags
assert set(elliptics.exceptions_policy.values.values()) == exceptions_policy
assert set(elliptics.filters.values.values()) == filters
assert set(elliptics.checkers.values.values()) == checkers
@pytest.mark.parametrize("prop, value", [
('timeout', 5),
('groups', []),
('exceptions_policy', elliptics.exceptions_policy.default_exceptions),
('cflags', 0),
('ioflags', 0),
('timestamp', elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1)),
('trace_id', 0),
('user_flags', 0)])
def test_properties_default(self, server, simple_node, prop, value):
session = elliptics.Session(node=simple_node)
assert getatt
|
r(session, prop) == value
@pytest.mark.parametrize('prop, setter, getter, values', [
('groups', 'set_groups', 'get_groups', (
[],
range(1, 100),
range(1, 100000),
range(10, 10000))),
('cflags', 'set_cflags', 'get_cflags', command_flags),
('ioflags', 'set_ioflags', 'get_ioflags', io_flags),
('exceptions_po
|
licy', 'set_exceptions_policy',
'get_exceptions_policy', tuple(exceptions_policy) + (
elliptics.exceptions_policy.throw_at_start |
elliptics.exceptions_policy.throw_at_wait,
elliptics.exceptions_policy.throw_at_start |
elliptics.exceptions_policy.throw_at_wait |
elliptics.exceptions_policy.throw_at_get |
elliptics.exceptions_policy.throw_at_iterator_end)),
('timeout', 'set_timeout', 'get_timeout', (
28376487,
2 ** 63 - 1)),
('timestamp', 'set_timestamp', 'get_timestamp', (
elliptics.Time(0, 0),
elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1),
elliptics.Time(238689126897, 1723861827))),
('trace_id', None, None, (
0,
32423946,
2 ** 32 - 1)),
('user_flags', 'set_user_flags', 'get_user_flags', (
0,
438975345,
2 ** 64 - 1))])
def test_properties(self, server, simple_node,
prop, setter, getter, values):
session = elliptics.Session(node=simple_node)
assert type(session) == elliptics.Session
for value in values:
set_property(session, prop, value,
setter=setter,
getter=getter)
def test_resetting_timeout(self, server, simple_node):
session = make_session(node=simple_node,
test_name='TestSession.test_resetting_timeout')
assert session.timeout == 5 # check default timeout value
session.timeout = 1 # set different value
assert session.timeout == 1 # check that the value has been set
session.timeout = 0 # set timeout to 0 which should reset to default
assert session.timeout == 5 # check default timeout value
@pytest.mark.parametrize("prop, value", [
('cflags', 2 ** 64),
('ioflags', 2 ** 32),
('exceptions_policy', 2 ** 32),
('timeout', 2 ** 63),
('trace_id', 2 ** 64),
('user_flags', 2 ** 64)])
def test_properties_out_of_limits(self, server, simple_node, prop, value):
session = elliptics.Session(simple_node)
pytest.raises(OverflowError,
"set_property(session, '{0}', {1})"
.format(prop, value))
def test_clone(self, server, simple_node):
orig_s = make_session(node=simple_node,
test_name='TestSession.test_clone')
orig_s.groups = [1, 2, 3]
orig_s.timeout = 13
orig_s.exceptions_policy = elliptics.exceptions_policy.throw_at_wait
orig_s.cflags = elliptics.command_flags.direct
orig_s.ioflags = elliptics.io_flags.overwrite
orig_s.timestamp = elliptics.Time(213, 415)
orig_s.trace_id = 731
orig_s.user_flags = 19731
clone_s = orig_s.clone()
assert clone_s.groups == orig_s.groups == [1, 2, 3]
assert clone_s.timeout == orig_s.timeout == 13
assert clone_s.exceptions_policy == orig_s.exceptions_policy == \
elliptics.exceptions_policy.throw_at_wait
assert clone_s.cflags == orig_s.cflags == elliptics.command_flags.direct
assert clone_s.ioflags == orig_s.ioflags == elliptics.io_flags.overwrite
assert clone_s.timestamp == orig_s.timestamp == elliptics.Time(213, 415)
assert clone_s.trace_id == orig_s.trace_id == 731
assert clone_s.user_flags == orig_s.user_flags == 19731
|
wogsland/QSTK
|
build/lib.linux-x86_64-2.7/QSTK/qstkutil/qsdateutil.py
|
Python
|
bsd-3-clause
| 9,008
| 0.01099
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
import datetime as dt
from datetime import timedelta
import time as t
import numpy as np
import os
import pandas as pd
def _cache_dates():
''' Caches dates '''
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure you have NYSE_dates.txt in the qstkutil directory"
datestxt = np.loadtxt(filename, dtype=str)
dates = []
for i in datestxt:
dates.append(dt.datetime.strptime(i, "%m/%d/%Y"))
return pd.TimeSeries(index=dates, data=dates)
GTS_DATES = _cache_dates()
def getMonthNames():
return(['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'])
def getYears(funds):
years=[]
for date in funds.index:
if(not(date.y
|
ear in years)):
years.append(date.year)
return(years)
def getMonths(funds,year):
months=[]
for date in funds.index:
if((date.year==year) and not(date.month in months)):
months.append(date.month)
return(months)
def getDays(funds,year,month):
days=[]
for date in funds.index:
if((date.year==year) and (date.month==month)):
days.append(date)
return(days)
def getDaysBetween(ts_start, ts_end):
days=[]
fo
|
r i in range(0,(ts_end-ts_start).days):
days.append(ts_start+timedelta(days=1)*i)
return(days)
def getFirstDay(funds,year,month):
for date in funds.index:
if((date.year==year) and (date.month==month)):
return(date)
return('ERROR')
def getLastDay(funds,year,month):
return_date = 'ERROR'
for date in funds.index:
if((date.year==year) and (date.month==month)):
return_date = date
return(return_date)
def getNextOptionClose(day, trade_days, offset=0):
#get third friday in month of day
#get first of month
year_off=0
if day.month+offset > 12:
year_off = 1
offset = offset - 12
first = dt.datetime(day.year+year_off, day.month+offset, 1, hour=16)
#get weekday
day_num = first.weekday()
#get first friday (friday - weekday) add 7 if less than 1
dif = 5 - day_num
if dif < 1:
dif = dif+7
#move to third friday
dif = dif + 14
friday = first+dt.timedelta(days=(dif-1))
#if friday is a holiday, options expire then
if friday in trade_days:
month_close = first + dt.timedelta(days=dif)
else:
month_close = friday
#if day is past the day after that
if month_close < day:
return_date = getNextOptionClose(day, trade_days, offset=1)
else:
return_date = month_close
return(return_date)
def getLastOptionClose(day, trade_days):
start = day
while getNextOptionClose(day, trade_days)>=start:
day= day - dt.timedelta(days=1)
return(getNextOptionClose(day, trade_days))
def getNYSEoffset(mark, offset):
''' Returns NYSE date offset by number of days '''
mark = mark.replace(hour=0, minute=0, second=0, microsecond=0)
i = GTS_DATES.index.searchsorted(mark, side='right')
# If there is no exact match, take first date in past
if GTS_DATES[i] != mark:
i -= 1
ret = GTS_DATES[i + offset]
ret = ret.replace(hour=16)
return ret
def getNYSEdays(startday = dt.datetime(1964,7,5), endday = dt.datetime(2020,12,31),
timeofday = dt.timedelta(0)):
"""
@summary: Create a list of timestamps between startday and endday (inclusive)
that correspond to the days there was trading at the NYSE. This function
depends on a separately created a file that lists all days since July 4,
1962 that the NYSE has been open, going forward to 2020 (based
on the holidays that NYSE recognizes).
@param startday: First timestamp to consider (inclusive)
@param endday: Last day to consider (inclusive)
@return list: of timestamps between startday and endday on which NYSE traded
@rtype datetime
"""
start = startday - timeofday
end = endday - timeofday
dates = GTS_DATES[start:end]
ret = [x + timeofday for x in dates]
return(ret)
def getNextNNYSEdays(startday, days, timeofday):
"""
@summary: Create a list of timestamps from startday that is days days long
that correspond to the days there was trading at NYSE. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
dates=[]
for i in datestxt:
if(len(dates)<days):
if((dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)>=startday):
dates.append(dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)
return(dates)
def getPrevNNYSEday(startday, timeofday):
"""
@summary: This function returns the last valid trading day before the start
day, or returns the start day if it is a valid trading day. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
#''' Set return to first day '''
dtReturn = dt.datetime.strptime( datestxt[0],"%m/%d/%Y")+timeofday
#''' Loop through all but first '''
for i in datestxt[1:]:
dtNext = dt.datetime.strptime(i,"%m/%d/%Y")
#''' If we are > startday, then use previous valid day '''
if( dtNext > startday ):
break
dtReturn = dtNext + timeofday
return(dtReturn)
def ymd2epoch(year, month, day):
"""
@summary: Convert YMD info into a unix epoch value.
@param year: The year
@param month: The month
@param day: The day
@return epoch: number of seconds since epoch
"""
return(t.mktime(dt.date(year,month,day).timetuple()))
def epoch2date(ts):
"""
@summary Convert seconds since epoch into date
@param ts: Seconds since epoch
@return thedate: A date object
"""
tm = t.gmtime(ts)
return(dt.date(tm.tm_year,tm.tm_mon,tm.tm_mday))
def _trade_dates(dt_start, dt_end, s_period):
'''
@summary: Generate dates on which we need to trade
@param c_strat: Strategy config class
@param dt_start: Start date
@param dt_end: End date
'''
ldt_timestamps = getNYSEdays(dt_start,
dt_end, dt.timedelta(hours=16) )
# Use pandas reindex method instead
# Note, dates are index as well as values, we select based on index
# but return values since it is a numpy array of datetimes instead of
# pandas specific.
ts_dates = pd.TimeSeries(index=ldt_timestamps, data=ldt_timestamps)
# These are the dates we want
if s_period[:2] == 'BW':
# special case for biweekly
dr_range = pd.DateRange(dt_start, dt_end,
|
gabrielhurley/django-block-comment
|
block_comment/models.py
|
Python
|
bsd-3-clause
| 2,966
| 0.001011
|
'''Custom models for the block_comment app.'''
import difflib
from django.contrib.comments.models import Comment
from django.db import models
from django.utils.translation import ugettext as _
from block_comment.diff_match_patch import diff_match_patch
class BlockComment(Comment):
'''
``BlockComment`` extends Django's comments framework to store information
about the block of text the comment relates to.
'''
# Position in the full text that the block the comment relates to begins at
index = models.PositiveIntegerField(null=True, blank=True)
# The text of the block, used for determining diffs/orphans
regarding = models.TextField(blank=True)
def get_match_index(self, haystack):
''' Returns the index of the closest match to needle within
the haystack. '''
def get_block_index(i):
''' ``haystack`` and ``blocks`` are accessible by closure. '''
return haystack.index(blocks[i])
needle = self.regarding.strip()
matches = []
blocks = haystack.split("\n")
block_index = None
# Check for an exact match first
if needle in blocks:
return get_block_index(blocks.index(needle))
# If that didn't work, do a basic diff comparison block-by-block
for p in blocks:
comp = difflib.SequenceMatcher(None, needle, p)
if comp.ratio() > .85:
matches.append(blocks.index(comp.b))
if len(matches) == 1:
block_index = matches.pop()
elif len(matches) == 0:
# No matches, can we find a potential match with a smarter
# matching algorithm?
matcher = diff_match_patch()
index = matcher.match_main(haystack, needle, 0)
if index > -1:
return index
else:
# We've got multiple options, let's narrow them down with
# a
|
smarter matching algorithm.
matcher = diff_match_patch()
for i in tuple(matches):
if matcher.match_main(blocks[i], needle, self.index) < 0:
# No match, discard this option
matches.remove(i)
# Unless we've only got one match left, we'll fall through to -1
if len(matches) == 1:
blo
|
ck_index = matches[0]
if block_index:
return get_block_index(block_index)
# If we can't find anything, return -1
return -1
def relink_comment(self, haystack, save=True):
index = self.get_match_index(haystack)
if index == self.index:
return None
elif index > -1:
self.index = index
else:
self.index = None
if save:
self.save()
|
charlesll/RamPy
|
legacy_code/IR_dec_comb.py
|
Python
|
gpl-2.0
| 6,585
| 0.027183
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 22 07:54:05 2014
@author: charleslelosq
Carnegie Institution for Science
"""
import sys
sys.path.append("/Users/charleslelosq/Documents/RamPy/lib-charles/")
import csv
import numpy as np
import scipy
import matplotlib
import matplotlib.gridspec as gridspec
from pylab import *
from StringIO import StringIO
from scipy import interpolate
# to fit spectra we use the lmfit
|
software of Matt Newville, CARS, university of Chicago, available on the web
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit, fit_report
from spectratools import * #Charles' libraries and functions
from Tkinter import *
import tkMessageBox
from tkFileDialog import askopenfilename
#### We define a set
|
of functions that will be used for fitting data
#### unfortunatly, as we use lmfit (which is convenient because it can fix or release
#### easily the parameters) we are not able to use arrays for parameters...
#### so it is a little bit long to write all the things, but in a way quite robust also...
#### gaussian and pseudovoigt functions are available in spectratools
#### if you need a voigt, fix the gaussian-to-lorentzian ratio to 1 in the parameter definition before
#### doing the data fit
def residual(pars, x, data=None, eps=None):
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
f1 = pars['f1'].value
f2 = pars['f2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
# Gaussian model
peak1 = gaussian(x,a1,f1,l1)
peak2 = gaussian(x,a2,f2,l2)
model = peak1 + peak2
if data is None:
return model, peak1, peak2
if eps is None:
return (model - data)
return (model - data)/eps
##### CORE OF THE CALCULATION BELOW
#### CALLING THE DATA NAMES
tkMessageBox.showinfo(
"Open file",
"Please open the list of spectra")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
with open(filename) as inputfile:
results = list(csv.reader(inputfile)) # we read the data list
#### LOOP FOR BEING ABLE TO TREAT MULTIPLE DATA
#### WARNING: OUTPUT ARE AUTOMATICALLY GENERATED IN A DIRECTORY CALLED "DECONV"
#### (see end) THAT SHOULD BE PRESENT !!!!!!!!!!
for lg in range(len(results)):
name = str(results[lg]).strip('[]')
name = name[1:-1] # to remove unwanted ""
sample = np.genfromtxt(name) # get the sample to deconvolute
# we set here the lower and higher bonds for the interest region
lb = 4700 ### MAY NEED TO AJUST THAT
hb = 6000
interestspectra = sample[np.where((sample[:,0] > lb)&(sample[:,0] < hb))]
ese0 = interestspectra[:,2]/abs(interestspectra[:,1]) #take ese as a percentage, we assume that the treatment was made correctly for error determination... if not, please put sigma = None
interestspectra[:,1] = interestspectra[:,1]/np.amax(interestspectra[:,1])*100 # normalise spectra to maximum, easier to handle after
sigma = abs(ese0*interestspectra[:,1]) #calculate good ese
#sigma = None # you can activate that if you are not sure about the errors
xfit = interestspectra[:,0] # region to be fitted
data = interestspectra[:,1] # region to be fitted
params = Parameters()
####################### FOR MELT:
####################### COMMENT IF NOT WANTED
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('a1', 1, True, 0, None, None),
('f1', 5200, True, 750, None, None),
('l1', 1, True, 0, None, None),
('a2', 1, True, 0, None, None),
('f2', 5400, True, None, None, None),
('l2', 1, True, None, None, None))
result = minimize(residual_melt, params, args=(xfit, data)) # fit data with leastsq model from scipy
model = fit_report(params) # the report
yout, peak1,peak2,= residual_melt(params,xfit) # the different peaks
#### We just calculate the different areas up to 4700 cmm-1 and those of the gaussians
# Select interest areas for calculating the areas of OH and H2Omol peaks
intarea45 = sample[np.where((sample[:,0]> 4100) & (sample[:,0]<4700))]
area4500 = np.trapz(intarea45[:,1],intarea45[:,0])
esearea4500 = 1/sqrt(area4500) # We assume that RELATIVE errors on areas are globally equal to 1/sqrt(Area)
# now for the gaussians
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
AireG1 = gaussianarea(a1,l1)
AireG2 = gaussianarea(a2,l2)
##### WE DO A NICE FIGURE THAT CAN BE IMPROVED FOR PUBLICATION
fig = figure()
plot(sample[:,0],sample[:,1],'k-')
plot(xfit,yout,'r-')
plot(xfit,peak1,'b-')
plot(xfit,peak2,'b-')
xlim(lb,hb)
ylim(0,np.max(sample[:,1]))
xlabel("Wavenumber, cm$^{-1}$", fontsize = 18, fontweight = "bold")
ylabel("Absorption, a. u.", fontsize = 18, fontweight = "bold")
text(4000,np.max(intarea45[:,1])+0.03*np.max(intarea45[:,1]),('Area OH: \n'+'%.1f' % area4500),color='b',fontsize = 16)
text(4650,a1 + 0.05*a1,('Area pic 1$: \n'+ '%.1f' % AireG1),color='b',fontsize = 16)
text(5000,a2 + 0.05*a2,('OH/H$_2$O$_{mol}$: \n'+'%.3f' % ratioOH_H2O+'\n+/-'+'%.3f' % eseratioOH_H2O),color='r',fontsize = 16)
##### output of data, fitted peaks, parameters, and the figure in pdf
##### all goes into the ./deconv/ folder
name.rfind('/')
nameout = name[name.rfind('/')+1::]
namesample = nameout[0:nameout.find('.')]
pathint = str('/deconv/') # the output folder
ext1 = '_ydec.txt'
ext2 = '_params.txt'
ext3 = '.pdf'
pathout1 = pathbeg+pathint+namesample+ext1
pathout2 = pathbeg+pathint+namesample+ext2
pathout3 = pathbeg+pathint+namesample+ext3
matout = np.vstack((xfit,data,yout,peak1,peak2))
matout = np.transpose(matout)
np.savetxt(pathout1,matout) # saving the arrays of spectra
fd = os.open( pathout2, os.O_RDWR|os.O_CREAT ) # Open a file and create it if it do not exist
fo = os.fdopen(fd, "w+") # Now get a file object for the above file.
fo.write(model) # write the parameters in it
fo.close()
savefig(pathout3) # save the figure
|
rwightman/pytorch-image-models
|
timm/models/sknet.py
|
Python
|
apache-2.0
| 8,742
| 0.004004
|
""" Selective Kernel Networks (ResNet base)
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268)
and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer
to the original paper with some modifications of my own to better balance param count vs accuracy.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import SelectiveKernel, ConvBnAct, create_attn
from .registry import register_model
from .resnet import ResNet
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'skresnet18': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'),
'skresnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'),
'skresnet50': _cfg(),
'skresnet50d': _cfg(
first_conv='conv1.0'),
'skresnext50_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'),
}
class SelectiveKernelBasic(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(SelectiveKernelBasic, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer)
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock doest not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = SelectiveKernel(
inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs)
conv_kwargs['act_layer'] = None
self.conv2 = ConvBnAct(
first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
class SelectiveKernelBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None,
drop_block=None, drop_path=None):
super(SelectiveKernelBottleneck, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer
|
=norm_layer, aa_layer=aa_layer)
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs)
self.conv2 = SelectiveKernel(
first
|
_planes, width, stride=stride, dilation=first_dilation, groups=cardinality,
**conv_kwargs, **sk_kwargs)
conv_kwargs['act_layer'] = None
self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
def _create_skresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def skresnet18(pretrained=False, **kwargs):
"""Constructs a Selective Kernel ResNet-18 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet18', pretrained, **model_args)
@register_model
def skresnet34(pretrained=False, **kwargs):
"""Constructs a Selective Kernel ResNet-34 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet34', pretrained, **model_args)
@register_model
def skresnet50(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNet-50 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet50', pretrained, **model_args)
@register_model
def skresnet50d(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNet-50-D model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet50d', pretrained, **model
|
a1fred/git-barry
|
gitbarry/reasons/__init__.py
|
Python
|
mit
| 177
| 0
|
from gitbarry.rea
|
sons import start, f
|
inish, switch # , switch, publish
REASONS = {
'start': start,
'finish': finish,
'switch': switch,
# 'publish': publish,
}
|
theguardian/LazyLibrarian_Old
|
lazylibrarian/notifiers/tweet.py
|
Python
|
gpl-3.0
| 5,477
| 0.012416
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/lazylibrarian/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import lazylibrarian
from lazylibrarian import logger, common, formatter
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl #@UnusedImport
except:
from cgi import parse_qsl #@Reimport
import lib.oauth2 as oauth
import lib.pythontwitter as twitter
class TwitterNotifier:
consumer_key = "208JPTMMnZjtKWA4obcH8g"
consumer_secret = "BKaHzaQRd5PK6EH8EqPZ1w8mz6NSk9KErArarinHutk"
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def notify_snatch(self, title):
if lazylibrarian.TWITTER_NOTIFY_ONSNATCH:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH]+': '+title)
def notify_download(self, title):
if lazylibrarian.TWITTER_NOTIFY_ONDOWNLOAD:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD]+': '+title)
def test_notify(self):
return self._notifyTwitter("This is a test notification from LazyLibrarian / " + formatter.now(), force=True)
def _get_authorization(self):
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info('Requesting temp token from Twitter')
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
else:
request_token = dict(parse_qsl(content))
lazylibrarian.TWITTER_USERNAME = request_token['oauth_token']
lazylibrarian.TWITTER_PASSWORD = request_token['oauth_token_secret']
return self.AUTHORIZATION_URL+"?oauth_token="+ request_token['oauth_token']
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = lazylibrarian.TWITTER_USERNAME
request_token['oauth_token_secret'] = lazylibrarian.TWITTER_PASSWORD
request_token['oauth_callback_confirmed'] = 'true'
|
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(key)
logger.info('Generating and signing request for an access token using
|
key '+key)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.info('oauth_consumer: '+str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: '+str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
logger.info('resp, content: '+str(resp)+','+str(content))
access_token = dict(parse_qsl(content))
logger.info('access_token: '+str(access_token))
logger.info('resp[status] = '+str(resp['status']))
if resp['status'] != '200':
logger.error('The request for a token with did not succeed: '+str(resp['status']))
return False
else:
logger.info('Your Twitter Access Token key: %s' % access_token['oauth_token'])
logger.info('Access Token secret: %s' % access_token['oauth_token_secret'])
lazylibrarian.TWITTER_USERNAME = access_token['oauth_token']
lazylibrarian.TWITTER_PASSWORD = access_token['oauth_token_secret']
return True
def _send_tweet(self, message=None):
username=self.consumer_key
password=self.consumer_secret
access_token_key=lazylibrarian.TWITTER_USERNAME
access_token_secret=lazylibrarian.TWITTER_PASSWORD
logger.info(u"Sending tweet: "+message)
api = twitter.Api(username, password, access_token_key, access_token_secret)
try:
api.PostUpdate(message)
except Exception, e:
logger.error(u"Error Sending Tweet: %s" %e)
return False
return True
def _notifyTwitter(self, message='', force=False):
prefix = lazylibrarian.TWITTER_PREFIX
if not lazylibrarian.USE_TWITTER and not force:
return False
return self._send_tweet(prefix+": "+message)
notifier = TwitterNotifier
|
arbakker/yt-daemon
|
search_yt.py
|
Python
|
mit
| 380
| 0.005263
|
import urllib
import urllib2
from bs4 import BeautifulSoup
textToSearch =
|
'gorillaz'
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}):
print 'https://www.youtube.com' + vid['href'
|
]
|
trungdtbk/faucet
|
tests/generative/integration/mininet_tests.py
|
Python
|
apache-2.0
| 17,678
| 0.001923
|
#!/usr/bin/env python3
import random
import unittest
import networkx
from mininet.topo import Topo
from clib.mininet_test_watcher import TopologyWatcher
from clib.mininet_test_base_topo import FaucetTopoTestBase
class FaucetFaultToleranceBaseTest(FaucetTopoTestBase):
"""
Generate a topology of the given parameters (using build_net & TopoBaseTest)
and then call network function to test the network and then slowly tear out bits
until the expected host connectivity does not match the real host connectivity.
===============================================================================================
INSTANT_FAIL:
The fault-tolerant tests will continue fail if there is a pair of hosts that can not
establish a connection.
Set to true to allow the test suite to continue testing the connectivity
for a fault to build the full graph for the current fault.
ASSUME_SYMMETRIC_PING:
A simplification can assume that (h1 -> h2) implies (h2 -> h1).
Set to true to assume that host connectivity is symmetric.
INTERVLAN_ONLY:
Set to true to test only the inter-VLAN connectivity; ignore connections between hosts on
the same VLAN. Speed up the inter-VLAN testing by ignoring the intra-VLAN cases for
tests that inherit from a intra-VLAN test. This creates that assumption that inter-VLAN
does not disrupt the intra-VLAN.
===============================================================================================
TODO: Add the following options
PROTECTED_NODES/EDGES: Prevent desired nodes/edges from being destroyed
ASSUME_TRANSITIVE_PING: Assume for (h1 -> h2) & (h2 -> h3) then (h1 -> h3)
IGNORE_SUBGRAPH: Assume for a topology with subgraphs, the subgraphs do not need to be tested
(if they have already been tested)
"""
INSTANT_FAIL = True
ASSUME_SYMMETRIC_PING = True
INTERVLAN_ONLY = False
# Watches the faults and host connectvitiy
topo_watcher = None
# List of fault events
fault_events = None
# Number of faults to occur before recalculating connectivity
num_faults = 1
# Fault-tolerance tests will only work in software
SOFTWARE_ONLY = True
# Randomization variables
seed = 1
rng = None
# Number of VLANs to create, if >= 2 then routing will be applied
NUM_VLANS = None
# Number of DPs in the network
NUM_DPS = None
# Number of links between switches
N_DP_LINKS = None
host_links = None
switch_links = None
routers = None
stack_roots = None
def setUp(self):
pass
def set_up(self, network_graph, stack_roots, host_links=None, host_vlans=None):
"""
Args:
network_graph (networkx.MultiGraph): Network topology for the test
stack_roots (dict): The priority values for the stack roots
host_links (dict): Links for each host to switches
host_vlans (dict): VLAN for each host
"""
super().setUp()
switch_links = list(network_graph.edges()) * self.N_DP_LINKS
link_vlans = {edge: None for edge in switch_links}
if not host_links or not host_vlans:
# Setup normal host links & vlans
host_links = {}
host_vlans = {}
host_n = 0
for dp_i in network_graph.nodes():
for v in range(self.NUM_VLANS):
host_links[host_n] = [dp_i]
host_vlans[host_n] = v
host_n += 1
dp_options = {}
for i in network_graph.nodes():
dp_options.setdefault(i, {
'group_table': self.GROUP_TABLE,
'ofchannel_log': self.debug_log_path + str(i) if
|
self.debug_log_path else None,
'hardware': 'Open vSwitch'
})
if i in stack_roots:
dp_options[i]['stack'] = {'priority': stack_roots[i]}
vlan_options = {}
routers = {}
if self.NUM_VLANS >= 2:
# Setup options for routing
|
routers = {0: list(range(self.NUM_VLANS))}
for i in range(self.NUM_VLANS):
vlan_options[i] = {
'faucet_mac': self.faucet_mac(i),
'faucet_vips': [self.faucet_vip(i)],
'targeted_gw_resolution': False
}
for i in network_graph.nodes():
dp_options[i]['arp_neighbor_timeout'] = 2
dp_options[i]['max_resolve_backoff_time'] = 2
dp_options[i]['proactive_learn_v4'] = True
self.host_links = host_links
self.switch_links = switch_links
self.routers = routers
self.stack_roots = stack_roots
self.build_net(
host_links=host_links,
host_vlans=host_vlans,
switch_links=switch_links,
link_vlans=link_vlans,
n_vlans=self.NUM_VLANS,
dp_options=dp_options,
vlan_options=vlan_options,
routers=routers
)
self.start_net()
def host_connectivity(self, host, dst):
"""Ping to a destination, return True if the ping was successful"""
try:
self._ip_ping(host, dst, 5, timeout=50, count=5, require_host_learned=False)
except AssertionError:
return False
return True
def calculate_connectivity(self):
"""Ping between each set of host pairs to calculate host connectivity"""
connected_hosts = self.topo_watcher.get_connected_hosts(
two_way=not self.ASSUME_SYMMETRIC_PING, strictly_intervlan=self.INTERVLAN_ONLY)
for src, dsts in connected_hosts.items():
src_host = self.host_information[src]['host']
for dst in dsts:
dst_host = self.host_information[dst]['host']
dst_ip = self.host_information[dst]['ip']
result = self.host_connectivity(src_host, dst_ip.ip)
self.topo_watcher.add_network_info(src_host.name, dst_host.name, result)
self.assertTrue(not self.INSTANT_FAIL or result, 'Pair connection failed')
def create_controller_fault(self, *args):
"""
Set controller down (disconnects all switches from the controller)
Args:
index: The index to the controller to take down
"""
index = args[0]
controller = self.net.controllers[index]
controller.stop()
self.net.controllers.remove(controller)
self.topo_watcher.add_fault('Controller %s DOWN' % controller.name)
def create_random_controller_fault(self, *args):
"""Randomly create a fault for a controller"""
controllers = [c for c in self.net.controllers if c.name != 'gauge']
i = random.randrange(len(controllers))
c_name = controllers[i].name
controller = next((cont for cont in self.net.controllers if cont.name == c_name), None)
if controller is None:
return
self.create_controller_fault(self.net.controllers.index(controller))
def create_switch_fault(self, *args):
"""
Set switch down (Deletes the OVS switch bridge)
Args:
index: Index of the switch dpid to take out
"""
index = args[0]
dpid = self.dpids[index]
switch_name = self.topo.switches_by_id[index]
switch = next((switch for switch in self.net.switches if switch.name == switch_name), None)
if switch is None:
return
self.dump_switch_flows(switch)
name = '%s:%s DOWN' % (self.topo.switches_by_id[index], self.dpids[index])
self.topo_watcher.add_switch_fault(index, name)
switch.stop()
switch.cmd(self.VSCTL, 'del-controller', switch.name, '|| true')
self.assertTrue(
self.wait_for_prometheus_var(
'of_dp_disconnections_total', 1, dpid=dpid), 'DP %s not detected as DOWN' % dpid)
self.net.switches.remove(switch)
def random_switch_fault(self, *args):
"""Randomly take out an available s
|
denis-vilyuzhanin/selenium-fastview
|
py/test/selenium/webdriver/common/element_attribute_tests.py
|
Python
|
apache-2.0
| 12,180
| 0.00312
|
#!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
class ElementAttributeTests(unittest.TestCase):
def testShouldReturnNullWhenGettingTheValueOfAnAttributeThatIsNotListed(self):
self._loadSimplePage()
head = self.driver.find_element_by_xpath("/html")
attribute = head.get_attribute("cheese")
self.assertTrue(attribute is None)
def testShouldReturnNullWhenGettingSrcAttributeOfInvalidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("invalidImgTag")
img_attr = img.get_attribute("src")
self.assertTrue(img_attr is None)
def testShouldReturnAnAbsoluteUrlWhenGettingSrcAttributeOfAValidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validImgTag")
img_attr = img.get_attribute("src")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnAnAbsoluteUrlWhenGettingHrefAttributeOfAValidAnchorTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validAnchorTag")
img_attr = img.get_attribute("href")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnEmptyAttributeValuesWhenPresentAndTheValueIsActuallyEmpty(self):
self._loadSimplePage()
body = self.driver.find_element_by_xpath("//body")
self.assertEqual("", body.get_attribute("style"))
def testShouldReturnTheValueOfTheDisabledAttributeAsFalseIfNotSet(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertEqual(None, inputElement.get_attribute("disabled"))
self.assertTrue(inputElement.is_enabled())
pElement = self.driver.find_element_by_id("peas")
self.assertEqual(None, pElement.get_attribute("disabled"))
self.assertTrue(pElement.is_enabled())
def testShouldReturnTheValueOfTheIndexAttrbuteEvenIfItIsMissing(self):
self._loadPage("formPage")
multiSelect = self.driver.find_element_by_id("multi")
options = multiSelect.find_elements_by_tag_name("option")
self.assertEqual("1", options[1].get_attribute("index"))
def testShouldIndicateTheElementsThatAreDisabledAreNotis_enabled(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='notWorking']")
self.assertFalse(inputElement.is_enabled())
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertTrue(inputElement.is_enabled())
def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
self.assertFalse(disabledTextElement1.is_enabled())
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
self.assertFalse(disabledTextElement2.is_enabled())
disabledSubmitElement = self.driver.find_element_by_id("disabledSubmitElement")
self.assertFalse(disabledSubmitElement.is_enabled())
def testShouldIndicateWhenATextAreaIsDisabled(self):
self._loadPage("formPage")
textArea = self.driver.find_element_by_xpath("//textarea[@id='notWorkingArea']")
self.assertFalse(textArea.is_enabled())
def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
try:
disabledTextElement1.send_keys("foo")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement1.text)
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
try:
disabledTextElement2.send_keys("bar")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement2.text)
def testShouldIndicateWhenASelectIsDisabled(self):
self._loadPage("formPage")
enabled = self.driver.find_element_by_name("selectomatic")
disabled = self.driver.find_element_by_name("no-select")
self.assertTrue(enabled.is_enabled())
self.assertFalse(disabled.is_enabled())
def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_xpath("//input[@id='checky']")
self.assertTrue(checkbox.get_attribute("checked") is None)
checkbox.click()
self.assertEqual("true", checkbox.get_attribute("checked"))
def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
neverSelected = self.driver.find_element_by_id("cheese")
initiallyNotSelected = self.driver.find_element_by_id("peas")
initiallySelected = self.driver.find_element_by_id("cheese_and_peas")
self.assertTrue(neverSelected
|
.get_attribute("selected") is None, "false")
self.assertTrue(initiallyNotSelected.get_at
|
tribute("selected") is None, "false")
self.assertEqual("true", initiallySelected.get_attribute("selected"), "true")
initiallyNotSelected.click()
self.assertTrue(neverSelected.get_attribute("selected") is None)
self.assertEqual("true", initiallyNotSelected.get_attribute("selected"))
self.assertTrue(initiallySelected.get_attribute("selected") is None)
def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
selectBox = self.driver.find_element_by_xpath("//select[@name='selectomatic']")
options = selectBox.find_elements_by_tag_name("option")
one = options[0]
two = options[1]
self.assertTrue(one.is_selected())
self.assertFalse(two.is_selected())
self.assertEqual("true", one.get_attribute("selected"))
self.assertTrue(two.get_attribute("selected") is None)
def testShouldReturnValueOfClassAttributeOfAnElement(self):
self._loadPage("xhtmlTest")
heading = self.driver.find_element_by_xpath("//h1")
classname = heading.get_attribute("class")
self.assertEqual("header", classname)
# Disabled due to issues with Frames
#def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(self):
# self._loadPage("iframes")
# self.driver.switch_to.frame("iframe1")
#
# wallace = self.driver.find_element_by_xpath("//div[@id='wallace']")
# classname = wallace.get_attribute("class")
# self.assertEqual("gromit", classname)
def testShouldReturnTheContentsOfATextAreaAsItsValue(self):
self._loadPage("formPage")
value = self.driver.find_element_by_id("withText").get_attribute("value")
self.assertEqual("Example text", value)
def testShouldReturnTheContentsOfATextAreaAsItsValueWhenSetToNonNorminalTrue(self):
self._loadPage("formPage")
e = self.driver.find_element_by_id("withText")
self.driver.execute_script("arguments[0].value = 'tRuE'", e)
value = e.get_attribute("value")
self.assertEqual("tRuE",
|
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/listVPCOfferings.py
|
Python
|
apache-2.0
| 5,265
| 0.002279
|
"""Lists VPC offerings"""
from baseCmd import *
from baseResponse import *
class listVPCOfferingsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""list VPC offerings by display text"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""list VPC offerings by id"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""true if need to list only default VPC offerings. Default value is false"""
self.isdefault = None
self.typeInfo['isdefault'] = 'boolean'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""list VPC offerings by name"""
self.name = None
self.typeInfo['name'] = 'string'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
"""list VPC offerings by state"""
self.state = None
self.typeInfo['state'] = 'string'
"""list VPC offerings supporting certain services"""
self.supportedservices = []
self.typeInfo['supportedservices'] = 'list'
self.required = []
class listVPCOfferingsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the vpc offering"""
self.id = None
self.typeInfo['id'] = 'string'
"""the date this vpc offering was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""an alternate display text of the vpc offering."""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""indicates if the vpc offering supports distributed router for one-hop forwarding"""
self.distributedvpcrouter = None
self.typeInfo['distributedvpcrouter'] = 'boolean'
"""true if vpc offering is default, false otherwise"""
self.isdefault = None
self.typeInfo['isdefault'] = 'boolean'
"""the name of the vpc offering"""
self.name = None
self.typeInfo['name'] = 'string'
"""The secondary system compute offering id used for the virtual router"""
self.secondaryserviceofferingid = None
self.typeInfo['secondaryserviceofferingid'] = 'string'
"""The secondary system compute offering name used for the virtual router"""
self.secondaryserviceofferingname = None
self.typeInfo['secondaryserviceofferingname'] = 'string'
"""The primary system compute offering id used for the virtual router"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""The primary system compute offering name used for the virtual router"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""state of the vpc offering. Can be Disabled/Enabled"""
self.state = None
self.typeInfo['state'] = 'string'
"""indicated if the offering can support region level vpc"""
self.supportsregionLevelvpc = None
self.typeInfo['supportsregionLevelvpc'] = 'boolean'
"""the list of supported services"""
self.service = []
class capability:
def __init__(self):
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
class provider:
def __init__(self):
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
|
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class service:
def __init__(self):
""""the service name""
|
"
self.name = None
""""the list of capabilities"""
self.capability = []
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
""""the service provider name"""
self.provider = []
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
|
pscedu/slash2-stable
|
slash2/utils/tsuite2/managers/ion.py
|
Python
|
isc
| 1,513
| 0.017184
|
from managers import sl2gen
from utils.ssh import SSH
from paramiko import SSHException
import sys
import logging
log = logging.getLogger("sl2.ion")
def launch_ion(tsuite):
"""Launch ION daemons.
Args:
tsuite: tsuite runtime."""
gdbcmd_path = tsuite.conf["slash2"]["ion_gdb"]
sl2gen.launch_gdb_sl(tsuite, "ion", tsuite.sl2objects["ion"], "sliod", gdbcmd_path)
def create_ion(tsuite):
"""Create ION file systems.
Args:
tsuite: tsuite runtime."""
for ion in tsuite.sl2objects["ion"]:
#Create monolithic reference/replace dict
repl_dict
|
= dict(tsuite.src_dirs, **tsuite.build_dirs)
repl_dict = dict(repl_dict, **ion)
#Create remote connection to server
try:
user, host = tsuite.user, ion["host"]
log.debug("Connecting to {0}@{1}".format(user, host))
ssh = SSH(user, host, '')
cmd = """
mkdir -p {datadir}
mkdir -p
|
{fsroot}
{slmkfs} -Wi -u {fsuuid} -I {site_id} {fsroot}"""\
.format(**repl_dict)
sock_name = "ts.ion."+ion["id"]
sl2gen.sl_screen_and_wait(tsuite, ssh, cmd, sock_name)
log.info("Finished creating {0}!".format(ion["name"]))
ssh.close()
except SSHException, e:
log.fatal("Error with remote connection to {0} with res {1}!"\
.format(ion["host"], ion["name"]))
tsuite.shutdown()
def kill_ion(tsuite):
"""Kill ION daemons.
Args:
tsuite: runtime tsuite."""
sl2gen.stop_slash2_socks(tsuite, "ion", tsuite.sl2objects["ion"], "slictl", "sliod")
|
remond-andre/discord-wow-armory-bot-modified
|
tests.py
|
Python
|
mit
| 12,125
| 0.00833
|
import unittest
from constants import *
from wow import *
from util import *
class BaseTest(unittest.TestCase):
def test_for_normal_query_split(self):
# Tests to ensure that the query gets split properly when the bot gets a message.
# Example query: '!armory pve/pvp <name> <realm> <region>'
sample_query = '!armory pve jimo burning-legion us'
self.assertEqual(split_query(sample_query, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
def test_for_url_query_split(self):
# Tests to ensure that the query string gets split p
|
roperly when the bot gets a url based message.
# Example query: '!armory pve/pvp <armory-link> <region>' (Accepts either a world of warcraft or battle net link)
sample_wow_url = '!armory pve https://worldofwarcraft.com/en-us/character/burning-legion/jimo us'
sam
|
ple_battlenet_url = '!armory pve http://us.battle.net/wow/en/character/burning-legion/jimo/advanced us'
self.assertEqual(split_query(sample_wow_url, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
self.assertEqual(split_query(sample_battlenet_url, 'pvp'), ['jimo', 'burning-legion', 'pvp', 'us'])
def test_for_warrior_class(self):
# Makes sure that when the id for the Warrior class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARRIOR),
{'colour': 0xC79C6E, 'name': 'Warrior'})
def test_for_paladin_class(self):
# Makes sure that when the id for the Paladin class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PALADIN),
{'colour': 0xF58CBA, 'name': 'Paladin'})
def test_for_hunter_class(self):
# Makes sure that when the id for the Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_HUNTER),
{'colour': 0xABD473, 'name': 'Hunter'})
def test_for_rogue_class(self):
# Makes sure that when the id for the Rogue class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_ROGUE),
{'colour': 0xFFF569, 'name': 'Rogue'})
def test_for_priest_class(self):
# Makes sure that when the id for the Priest class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PRIEST),
{'colour': 0xFFFFFF, 'name': 'Priest'})
def test_for_death_knight_class(self):
# Makes sure that when the id for the Death Knight class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEATH_KNIGHT),
{'colour': 0xC41F3B, 'name': 'Death Knight'})
def test_for_shaman_class(self):
# Makes sure that when the id for the Shaman class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_SHAMAN),
{'colour': 0x0070DE, 'name': 'Shaman'})
def test_for_mage_class(self):
# Makes sure that when the id for the Mage class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MAGE),
{'colour': 0x69CCF0, 'name': 'Mage'})
def test_for_warlock_class(self):
# Makes sure that when the id for the Warlock class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARLOCK),
{'colour': 0x9482C9, 'name': 'Warlock'})
def test_for_monk_class(self):
# Makes sure that when the id for the Monk class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MONK),
{'colour': 0x00FF96, 'name': 'Monk'})
def test_for_druid_class(self):
# Makes sure that when the id for the Druid class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DRUID),
{'colour': 0xFF7D0A, 'name': 'Druid'})
def test_for_demon_hunter_class(self):
# Makes sure that when the id for the Demon Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEMON_HUNTER),
{'colour': 0xA330C9, 'name': 'Demon Hunter'})
def test_for_faction_name(self):
# Makes sure that when the id for either the Horde or Alliance faction is
# passsed we get the correct name in return.
self.assertEqual(faction_details(FACTION_ALLIANCE), 'Alliance')
self.assertEqual(faction_details(FACTION_HORDE), 'Horde')
def test_for_achievement_progress(self):
# Passes in some mock API data and expects it to return as completed.
# Tests for accuracy on each id check, not API data.
self.maxDiff = None
input_data_horde_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5356, 5353, 5349, 11191, 11192, 11874]
}
}
input_data_alliance_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5343, 5339, 5334, 11192, 11874, 11875]
}
}
expected_horde_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_HIGH_WARLORD_NAME,
'rbg_2000_name': AC_CHAMPION_NAME,
'rbg_1500_name': AC_FIRST_SERGEANT_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Cutting Edge',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Ahead of the Curve'
}
expected_alliance_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_GRAND_MARSHALL_NAME,
'rbg_2000_name': AC_LIEAUTENANT_COMMANDER_NAME,
'rbg_1500_name': AC_SERGEANT_MAJOR_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Ahead of the Curve',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Cutting Edge'
}
self.assertEqual(character_achievements(input_data_horde_sample, 'Horde'), expected_horde_data)
self.assertEqual(character_achievements(input_data_alliance_sample, 'Alliance'), expected_alliance_data)
def test_pvp_progression(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
self.maxDiff = None
sample_data = {
"pvp": {
"brackets": {
"ARENA_BRACKET_2v2": {
"rating": 5928,
},
"ARENA_BRACKET_3v3": {
"rating": 1858,
},
"ARENA_BRACKET_RBG": {
"rating": 5999,
},
"ARENA_BRACKET_2v2_SKIRMISH": {
"rating": 2985,
}
}
},
"totalHonorableKills": 888399
}
expected_data = {
'2v2': 5928,
'2v2s': 2985,
'3v3': 1858,
|
superstack/nova
|
nova/db/sqlalchemy/migration.py
|
Python
|
apache-2.0
| 3,168
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from nova import flags
import sqlalchemy
from migrate.versioning import api as versioning_api
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
try:
# python-migration changed location of exceptions after 1.6.3
# See LP Bug #717467
from migrate import exc
|
eptions as versioning_exceptions
except ImportError:
sys.exit(_("python-migrate is not installed. Exiting."))
FLAGS = flags.FLAGS
def db_sync(version=None):
db_version()
repo_path = _find_migrate_repo()
return versioning_api.upgrade(FLA
|
GS.sql_connection, repo_path, version)
def db_version():
repo_path = _find_migrate_repo()
try:
return versioning_api.db_version(FLAGS.sql_connection, repo_path)
except versioning_exceptions.DatabaseNotControlledError:
# If we aren't version controlled we may already have the database
# in the state from before we started version control, check for that
# and set up version_control appropriately
meta = sqlalchemy.MetaData()
engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
meta.reflect(bind=engine)
try:
for table in ('auth_tokens', 'zones', 'export_devices',
'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
'volumes'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
return db_version_control(0)
def db_version_control(version=None):
repo_path = _find_migrate_repo()
versioning_api.version_control(FLAGS.sql_connection, repo_path, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
return path
|
robmcmullen/peppy
|
peppy/third_party/aui/framemanager.py
|
Python
|
gpl-2.0
| 360,518
| 0.00593
|
# --------------------------------------------------------------------------- #
# AUI Library wxPython IMPLEMENTATION
#
# Original C++ Code From Kirix (wxAUI). You Can Find It At:
#
# License: wxWidgets license
#
# http:#www.kirix.com/en/community/opensource/wxaui/about_wxaui.html
#
# Current wxAUI Version Tracked: wxWidgets 2.9.0 SVN HEAD
#
#
# Python Code By:
#
# Andrea Gavana, @ 23 Dec 2005
# Latest Revision: 19 Aug 2010, 22.00 GMT
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# andrea.gavana@gmail.com
# gavana@kpo.kz
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
Description
===========
framemanager is the central module of the AUI class framework.
L{AuiManager} manages the panes associated with it for a particular `wx.Frame`, using
a pane's L{AuiPaneInfo} information to determine each pane's docking and floating
behavior. AuiManager uses wxPython' sizer mechanism to plan the layout of each frame.
It uses a replaceable dock art class to do all drawing, so all drawing is localized
in one area, and may be customized depending on an application's specific needs.
AuiManager works as follows: the programmer adds panes to the class, or makes
changes to existing pane properties (dock position, floating state, show state, etc...).
To apply these changes, AuiManager's L{AuiManager.Update} function is called. This batch
processing can be used to avoid flicker, by modifying more than one pane at a time,
and then "committing" all of the changes at once by calling `Update()`.
Panes can be added quite easily::
text1 = wx.TextCtrl(self, -1)
text2 = wx.TextCtrl(self, -1)
self._mgr.AddPane(text1, AuiPaneInfo().Left().Caption("Pane Number One"))
self._mgr.AddPane(text2, AuiPaneInfo().Bottom().Caption("Pane Number Two"))
self._mgr.Update()
Later on, the positions can be modified easily. The following will float an
existing pane in a tool window::
self._mgr.GetPane(text1).Float()
Layers, Rows and Directions, Positions
======================================
Inside AUI, the docking layout is figured out by checking several pane parameters.
Four of these are important for determining where a pane will end up.
**Direction** - Each docked pane has a direction, `Top`, `Bottom`, `Left`, `Right`, or `Center`.
This is fairly self-explanatory. The pane will be placed in the location
|
specified
by this variable.
**Position** - More than one pane can be placed inside of a "dock". Imagine two panes
being docked on the left side of a window. One pane can be placed over another.
In proportionally managed docks, the pane position indicates it's sequential position,
starting with zero. So, in our scenario with two panes docked on the
|
left side, the
top pane in the dock would have position 0, and the second one would occupy position 1.
**Row** - A row can allow for two docks to be placed next to each other. One of the most
common places for this to happen is in the toolbar. Multiple toolbar rows are allowed,
the first row being in row 0, and the second in row 1. Rows can also be used on
vertically docked panes.
**Layer** - A layer is akin to an onion. Layer 0 is the very center of the managed pane.
Thus, if a pane is in layer 0, it will be closest to the center window (also sometimes
known as the "content window"). Increasing layers "swallow up" all layers of a lower
value. This can look very similar to multiple rows, but is different because all panes
in a lower level yield to panes in higher levels. The best way to understand layers
is by running the AUI sample (`AUI.py`).
"""
__author__ = "Andrea Gavana <andrea.gavana@gmail.com>"
__date__ = "31 March 2009"
import wx
import time
import types
import warnings
import auibar
import auibook
import tabmdi
import dockart
import tabart
from aui_utilities import Clip, PaneCreateStippleBitmap, GetDockingImage, GetSlidingPoints
from aui_constants import *
# Define this as a translation function
_ = wx.GetTranslation
_winxptheme = False
if wx.Platform == "__WXMSW__":
try:
import winxptheme
_winxptheme = True
except ImportError:
pass
# AUI Events
wxEVT_AUI_PANE_BUTTON = wx.NewEventType()
wxEVT_AUI_PANE_CLOSE = wx.NewEventType()
wxEVT_AUI_PANE_MAXIMIZE = wx.NewEventType()
wxEVT_AUI_PANE_RESTORE = wx.NewEventType()
wxEVT_AUI_RENDER = wx.NewEventType()
wxEVT_AUI_FIND_MANAGER = wx.NewEventType()
wxEVT_AUI_PANE_MINIMIZE = wx.NewEventType()
wxEVT_AUI_PANE_MIN_RESTORE = wx.NewEventType()
wxEVT_AUI_PANE_FLOATING = wx.NewEventType()
wxEVT_AUI_PANE_FLOATED = wx.NewEventType()
wxEVT_AUI_PANE_DOCKING = wx.NewEventType()
wxEVT_AUI_PANE_DOCKED = wx.NewEventType()
wxEVT_AUI_PERSPECTIVE_CHANGED = wx.NewEventType()
EVT_AUI_PANE_BUTTON = wx.PyEventBinder(wxEVT_AUI_PANE_BUTTON, 0)
""" Fires an event when the user left-clicks on a pane button. """
EVT_AUI_PANE_CLOSE = wx.PyEventBinder(wxEVT_AUI_PANE_CLOSE, 0)
""" A pane in `AuiManager` has been closed. """
EVT_AUI_PANE_MAXIMIZE = wx.PyEventBinder(wxEVT_AUI_PANE_MAXIMIZE, 0)
""" A pane in `AuiManager` has been maximized. """
EVT_AUI_PANE_RESTORE = wx.PyEventBinder(wxEVT_AUI_PANE_RESTORE, 0)
""" A pane in `AuiManager` has been restored from a maximized state. """
EVT_AUI_RENDER = wx.PyEventBinder(wxEVT_AUI_RENDER, 0)
""" Fires an event every time the AUI frame is being repainted. """
EVT_AUI_FIND_MANAGER = wx.PyEventBinder(wxEVT_AUI_FIND_MANAGER, 0)
""" Used to find which AUI manager is controlling a certain pane. """
EVT_AUI_PANE_MINIMIZE = wx.PyEventBinder(wxEVT_AUI_PANE_MINIMIZE, 0)
""" A pane in `AuiManager` has been minimized. """
EVT_AUI_PANE_MIN_RESTORE = wx.PyEventBinder(wxEVT_AUI_PANE_MIN_RESTORE, 0)
""" A pane in `AuiManager` has been restored from a minimized state. """
EVT_AUI_PANE_FLOATING = wx.PyEventBinder(wxEVT_AUI_PANE_FLOATING, 0)
""" A pane in `AuiManager` is about to be floated. """
EVT_AUI_PANE_FLOATED = wx.PyEventBinder(wxEVT_AUI_PANE_FLOATED, 0)
""" A pane in `AuiManager` has been floated. """
EVT_AUI_PANE_DOCKING = wx.PyEventBinder(wxEVT_AUI_PANE_DOCKING, 0)
""" A pane in `AuiManager` is about to be docked. """
EVT_AUI_PANE_DOCKED = wx.PyEventBinder(wxEVT_AUI_PANE_DOCKED, 0)
""" A pane in `AuiManager` has been docked. """
EVT_AUI_PERSPECTIVE_CHANGED = wx.PyEventBinder(wxEVT_AUI_PERSPECTIVE_CHANGED, 0)
""" The layout in `AuiManager` has been changed. """
# ---------------------------------------------------------------------------- #
class AuiDockInfo(object):
""" A class to store all properties of a dock. """
def __init__(self):
"""
Default class constructor.
Used internally, do not call it in your code!
"""
object.__init__(self)
self.dock_direction = 0
self.dock_layer = 0
self.dock_row = 0
self.size = 0
self.min_size = 0
self.resizable = True
self.fixed = False
self.toolbar = False
self.rect = wx.Rect()
self.panes = []
def IsOk(self):
"""
Returns whether a dock is valid or not.
In order to be valid, a dock needs to have a non-zero `dock_direction`.
"""
return self.dock_direction != 0
def IsHorizontal(self):
""" Returns whether the dock is horizontal or not. """
return self.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]
def IsVertical(self):
""" Returns whether the dock is vertical or not. """
return self.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT, AUI_DOCK_CENTER]
# ---------------------------------------------------------------------------- #
class AuiDockingGuideInfo(object):
""" A class which holds information about VS2005 docking guide windows. """
def __init__(self, other=None):
"""
Default class constructor.
Used internally, do not call it in your code!
:param `other`: another instance of L{AuiDockingGuideInfo}.
"""
if other:
self.Assign(other)
else:
# window representing the d
|
raffo85h/projecteuler
|
87. Prime power triples.py
|
Python
|
gpl-2.0
| 692
| 0.027457
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 29 15:52:33 2014
@auth
|
or: raffaelerainone
"""
from time import clock
from math import sqrt
def is_prime(n):
check=True
i=2
while check and i<=sqrt(n):
if n%i==0:
check=False
i+=1
return check
start = clock()
lim=50*(10**6)
A=[]
prime_2 = [i for i in range(2,int(lim**(0.5))) if is_prime(i)]
prime_3 = [i for i in prime_2 if i<(int(lim**(0.34)))]
prime_4 = [i for i in prime_3 if i<(int(lim**(0.25)))]
|
for i in prime_2:
for j in prime_3:
for k in prime_4:
x=(i**2)+(j**3)+(k**4)
if x<lim:
A.append(x)
print len(set(A))
print clock() - start
|
StellarCN/py-stellar-base
|
stellar_sdk/xdr/transaction_result.py
|
Python
|
apache-2.0
| 2,928
| 0
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .int64 import Int64
from .transaction_result_ext import TransactionResultExt
from .transaction_result_result import TransactionResultResult
__all__ = ["TransactionResult"]
@type_checked
class TransactionResult:
"""
XDR Source Code::
struct TransactionResult
{
int64 feeCharged; // actual fee charged for the transaction
union switch (TransactionResultCode code)
{
case txFEE_BUMP_INNER_SUCCESS:
case txFEE_BUMP_INNER_FAILED:
InnerTransactionResultPair innerResultPair;
case txSUCCESS:
case txFAILED:
OperationResult results<>;
default:
|
void;
}
result;
// reserved for future use
union switch (int v)
{
case 0:
void;
}
ext;
};
"""
def __init__(
self,
fee_charged: Int64,
result: TransactionResultResult,
ext: TransactionResultExt,
) -> None:
self.fee_charged = fee_charged
self.result = res
|
ult
self.ext = ext
def pack(self, packer: Packer) -> None:
self.fee_charged.pack(packer)
self.result.pack(packer)
self.ext.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "TransactionResult":
fee_charged = Int64.unpack(unpacker)
result = TransactionResultResult.unpack(unpacker)
ext = TransactionResultExt.unpack(unpacker)
return cls(
fee_charged=fee_charged,
result=result,
ext=ext,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "TransactionResult":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "TransactionResult":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.fee_charged == other.fee_charged
and self.result == other.result
and self.ext == other.ext
)
def __str__(self):
out = [
f"fee_charged={self.fee_charged}",
f"result={self.result}",
f"ext={self.ext}",
]
return f"<TransactionResult {[', '.join(out)]}>"
|
buhe/judge
|
executors/RUBY19.py
|
Python
|
agpl-3.0
| 407
| 0.002457
|
import os
from .ruby import RubyExecutor
class Exe
|
cutor(RubyExecutor):
name = 'RUBY19'
def get_nproc(self):
return [-1, 1][os.name == 'nt']
def get_security(self):
from cptbox.syscalls import sys_write
sec = super(Executor, self).get_security()
sec[sys_write] = lambda debugger: debugger.arg0 in (1, 2, 4)
r
|
eturn sec
initialize = Executor.initialize
|
gonicus/gosa
|
plugins/goto/conftest.py
|
Python
|
lgpl-2.1
| 496
| 0.002016
|
import pytest
from
|
gosa.common import Environment
from gosa.common.components import PluginRegistry, ObjectRegistry
import os
def pytest_unconfigure(config):
PluginRegistry.getInstance('HTTPService').srv.stop()
PluginRegistry.shutdown()
@pytest.fixture(scope="session", autouse=True)
def use_test_config():
oreg = ObjectRegistry.getInstance() # @UnusedVariable
pr = PluginRegistry() # @UnusedVariable
cr = PluginRegistry.getIn
|
stance("CommandRegistry") # @UnusedVariable
|
conejoninja/pelisalacarta
|
python/version-xbmc-09-plugin/core/config.py
|
Python
|
gpl-3.0
| 5,454
| 0.018532
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# Gestión de parámetros de configuración - xbmc
#------------------------------------------------------------
# tvalacarta
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
# Creado por: Jesús (tvalacarta@gmail.com)
# Licencia: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#------------------------------------------------------------
import sys
import os
import xbmcplugin
import xbmc
PLATFORM_NAME = "xbmc-plugin"
PLUGIN_NAME = "pelisalacarta"
def get_platform():
return PLATFORM_NAME
def is_xbmc():
return True
def get_library_support():
return True
def get_system_platform():
""" fonction: pour recuperer la platform que xbmc tourne """
import xbmc
platform = "unknown"
if xbmc.getCondVisibility( "system.platform.linux" ):
platform = "linux"
elif xbmc.getCondVisibility( "system.platform.xbox" ):
platform = "xbox"
elif xbmc.getCondVisibili
|
ty( "system.platform.windows" ):
platform = "windows"
elif xbmc.getCondVisibility( "system.platform.osx" ):
platform = "osx"
return platform
def open_settings():
xbmcplugin.openSettings( sys.argv[ 0 ] )
def get_setting(name):
return xbmcplugin.getSetting(name)
def set_setting(name,value):
try:
xbmcplugin.setSetting(name,value)
exc
|
ept:
pass
def get_localized_string(code):
dev = xbmc.getLocalizedString( code )
try:
dev = dev.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
except:
pass
return dev
def get_library_path():
#return os.path.join( get_data_path(), 'library' )
default = os.path.join( get_data_path(), 'library' )
value = get_setting("librarypath")
if value=="":
value=default
return value
def get_temp_file(filename):
return xbmc.translatePath( os.path.join( "special://temp/", filename ))
def get_runtime_path():
return os.getcwd()
def get_data_path():
devuelve = xbmc.translatePath( os.path.join("special://home/","userdata","plugin_data","video",PLUGIN_NAME) )
# XBMC en modo portable
if devuelve.startswith("special:"):
devuelve = xbmc.translatePath( os.path.join("special://xbmc/","userdata","plugin_data","video",PLUGIN_NAME) )
# Plex 8
if devuelve.startswith("special:"):
devuelve = os.getcwd()
return devuelve
def get_cookie_data():
import os
ficherocookies = os.path.join( get_data_path(), 'cookies.dat' )
cookiedatafile = open(ficherocookies,'r')
cookiedata = cookiedatafile.read()
cookiedatafile.close();
return cookiedata
# Test if all the required directories are created
def verify_directories_created():
import logger
import os
logger.info("pelisalacarta.core.config.verify_directories_created")
# Force download path if empty
download_path = get_setting("downloadpath")
if download_path=="":
download_path = os.path.join( get_data_path() , "downloads")
set_setting("downloadpath" , download_path)
# Force download list path if empty
download_list_path = get_setting("downloadlistpath")
if download_list_path=="":
download_list_path = os.path.join( get_data_path() , "downloads" , "list")
set_setting("downloadlistpath" , download_list_path)
# Force bookmark path if empty
bookmark_path = get_setting("bookmarkpath")
if bookmark_path=="":
bookmark_path = os.path.join( get_data_path() , "bookmarks")
set_setting("bookmarkpath" , bookmark_path)
# Create data_path if not exists
if not os.path.exists(get_data_path()):
logger.debug("Creating data_path "+get_data_path())
try:
os.mkdir(get_data_path())
except:
pass
# Create download_path if not exists
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path "+download_path)
try:
os.mkdir(download_path)
except:
pass
# Create download_list_path if not exists
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path "+download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
# Create bookmark_path if not exists
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path "+bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
# Create library_path if not exists
if not get_library_path().lower().startswith("smb") and not os.path.exists(get_library_path()):
logger.debug("Creating library_path "+get_library_path())
try:
os.mkdir(get_library_path())
except:
pass
# Checks that a directory "xbmc" is not present on platformcode
old_xbmc_directory = os.path.join( get_runtime_path() , "platformcode" , "xbmc" )
if os.path.exists( old_xbmc_directory ):
logger.debug("Removing old platformcode.xbmc directory")
try:
import shutil
shutil.rmtree(old_xbmc_directory)
except:
pass
|
KonradMagnusson/PyPLOG
|
__init__.py
|
Python
|
mit
| 285
| 0.010526
|
import plog.plog as plg
PLOG = plg.PLOG
plog_col
|
or = plg.plog_color
plog = plg
|
.plog
def perr(*msg, delim=" "):
plog(*msg, type=PLOG.err, delim=delim)
def pwrn(*msg, delim=" "):
plog(*msg, type=PLOG.warn, delim=delim)
__all__ = ["PLOG", "plog_color", "plog", "perr", "pwrn"]
|
j5shi/Thruster
|
pylibs/sqlite3/test/regression.py
|
Python
|
gpl-2.0
| 12,067
| 0.001243
|
#-*- coding: iso-8859-1 -*-
# pysqlite2/test/regression.py: pysqlite regression tests
#
# Copyright (C) 2006-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import unittest
import sqlite3 as sqlite
class RegressionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def CheckPragmaUserVersion(self):
# This used to crash pysqlite because this pragma command returns NULL for the column name
cur = self.con.cursor()
cur.execute("pragma user_version")
def CheckPragmaSchemaVersion(self):
# This still crashed pysqlite <= 2.2.1
con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
try:
cur = self.con.cursor()
cur.execute("pragma schema_version")
finally:
cur.close()
con.close()
def CheckStatementReset(self):
# pysqlite 2.1.0 to 2.2.0 have the problem that not all statements are
# reset before a rollback, but only those that are still in the
# statement cache. The others are not accessible from the connection object.
con = sqlite.connect(":memory:", cached_statements=5)
cursors = [con.cursor() for x in xrange(5)]
cursors[0].execute("create table test(x)")
for i in range(10):
cursors[0].executemany("insert into test(x) values (?)", [(x,) for x in xrange(10)])
for i in range(5):
cursors[i].execute(" " * i + "select x from test")
con.rollback()
def CheckColumnNameWithSpaces(self):
cur = self.con.cursor()
cur.execute('select 1 as "foo bar [datetime]"')
self.assertEqual(cur.description[0][0], "foo bar")
cur.execute('select 1 as "foo baz"')
self.assertEqual(cur.description[0][0], "foo baz")
def CheckStatementFinalizationOnCloseDb(self):
# pysqlite versions <= 2.3.3 only finalized statements in the statement
# cache when closing the database. statements that were still
# referenced in cursors weren't closed an could provoke "
# "OperationalError: Unable to close due to unfinalised statements".
con = sqlite.connect(":memory:")
cursors = []
# default statement cache size is 100
for i in range(105):
cur = con.cursor()
cursors.append(cur)
cur.execute("select 1 x union select " + str(i))
con.close()
def CheckOnConflictRollback(self):
if sqlite.sqlite_version_info < (3, 2, 2):
return
con = sqlite.connect(":memory:")
con.execute("create table foo(x, unique(x) on conflict rollback)")
con.execute("insert into foo(x) values (1)")
try:
con.execute("insert into foo(x) values (1)")
except sqlite.DatabaseError:
pass
con.execute("insert into foo(x) values (2)")
try:
con.commit()
except sqlite.OperationalError:
self.fail("pysqlite knew nothing about the implicit ROLLBACK")
def CheckWorkaroundForBuggySqliteTransferBindings(self):
"""
pysqlite would crash with older SQLite versions unless
a workaround is implemented.
"""
self.con.execute("create table foo(bar)")
self.con.execute("drop table foo")
self.con.execute("create table foo(bar)")
def CheckEmptyStatement(self):
"""
pysqlite used to segfault with SQLite versions 3.5.x. These return NULL
for "no-operation" statements
"""
self.con.execute("")
def CheckUnicodeConnect(self):
"""
With pysqlite 2.4.0 you needed to use a string or a APSW connection
object for opening database connections.
Formerly, both bytestrings and unicode strings used to work.
Let's make sure unicode strings work in the future.
"""
con = sqlite.connect(u":memory:")
con.close()
def CheckTypeMapUsage(self):
"""
pysqlite until 2.4.1 did not rebuild the row_cast_map when recompiling
a statement. This test exhibits the problem.
"""
SELECT = "select * from foo"
con = sqlite.connect(":
|
memory:",detect_types=sqlite.PARSE_DECLTYPES)
con.execute("create table foo(bar timestamp)")
con.execute("insert into foo(bar) values (?)", (datetime.datetime.now(),))
con.execute(SELECT)
con.execute("drop table foo")
con.execute("create table foo(bar integer)")
con.execute("insert into foo(bar) values (5)")
con.execute(SELECT)
|
def CheckRegisterAdapter(self):
"""
See issue 3312.
"""
self.assertRaises(TypeError, sqlite.register_adapter, {}, None)
def CheckSetIsolationLevel(self):
"""
See issue 3312.
"""
con = sqlite.connect(":memory:")
self.assertRaises(UnicodeEncodeError, setattr, con,
"isolation_level", u"\xe9")
def CheckCursorConstructorCallCheck(self):
"""
Verifies that cursor methods check whether base class __init__ was
called.
"""
class Cursor(sqlite.Cursor):
def __init__(self, con):
pass
con = sqlite.connect(":memory:")
cur = Cursor(con)
try:
cur.execute("select 4+5").fetchall()
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("should have raised ProgrammingError")
def CheckConnectionConstructorCallCheck(self):
"""
Verifies that connection methods check whether base class __init__ was
called.
"""
class Connection(sqlite.Connection):
def __init__(self, name):
pass
con = Connection(":memory:")
try:
cur = con.cursor()
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("should have raised ProgrammingError")
def CheckCursorRegistration(self):
"""
Verifies that subclassed cursor classes are correctly registered with
the connection object, too. (fetch-across-rollback problem)
"""
class Connection(sqlite.Connection):
def cursor(self):
return Cursor(self)
class Cursor(sqlite.Cursor):
def __init__(self, con):
sqlite.Cursor.__init__(self, con)
con = Connection(":memory:")
cur = con.cursor()
cur.execute("create table foo(x)")
cur.executemany("insert into foo(x) values (?)", [(3,), (4,), (5,)])
cur.execute("select x from foo")
con.rollback()
try:
cur.fetchall()
self.fail("should have raised InterfaceError")
except sqlite.InterfaceError:
pass
except:
|
UnknownStudio/Codeic
|
ScratchPlus/kurt/doc/conf.py
|
Python
|
mpl-2.0
| 10,130
| 0.00849
|
# -*- coding: utf-8 -*-
#
# Kurt documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 29 16:09:55 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from sphinx.ext import autodoc
sys.path.append(os.path.abspath('../'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.intersphinx', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kurt'
copyright = u'2013, Tim Radvan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_themes']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default' #'armstrong'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Kurtdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Kurt.tex', u'Kurt Documentation',
u'Tim Radvan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kurt', u'Kurt Documentation',
[u'Tim Radvan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping t
|
he document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Kurt', u'Kurt Documentation',
u'Tim Radvan', 'K
|
urt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Kurt'
epub_author = u'Tim Radvan'
epub_publisher = u'Tim Radvan'
epub_copyright = u'2013, Tim Radvan'
# The language of the text. It defaults to the
|
anmekin/django-httplog
|
test_app/urls.py
|
Python
|
bsd-3-clause
| 405
| 0.004938
|
# coding: utf-8
try
|
:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
from django.http import HttpResponse
def dummy(request):
return HttpResponse()
urlpatterns = patterns('',
url('^api/.+/$', dummy, name='dummy'),
url('', include('django.contrib.auth.urls', a
|
pp_name='auth', namespace='auth'))
)
|
WatsonDNA/nlp100
|
chap05/k44.py
|
Python
|
unlicense
| 343
| 0
|
#
# usage: pyth
|
on k44.py {file name} {number}
#
import sys
import pydot
from k41 import *
from k42 import get_relation_pairs
if __name__ == '__main__':
fn, nos = sys.argv[1], int(sys.argv[2])
sl = load_cabocha(fn)
pl = get_relation_pairs([sl[nos-1]])
g =
|
pydot.graph_from_edges(pl)
g.write_png('result.png', prog='dot')
|
pantsbuild/pants
|
src/python/pants/backend/scala/subsystems/scala.py
|
Python
|
apache-2.0
| 1,433
| 0.004187
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache L
|
icense, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from pants.option.option_types import DictOption
from pants.opt
|
ion.subsystem import Subsystem
DEFAULT_SCALA_VERSION = "2.13.6"
_logger = logging.getLogger(__name__)
class ScalaSubsystem(Subsystem):
options_scope = "scala"
help = "Scala programming language"
_version_for_resolve = DictOption[str](
"--version-for-resolve",
help=(
"A dictionary mapping the name of a resolve to the Scala version to use for all Scala "
"targets consuming that resolve.\n\n"
'All Scala-compiled jars on a resolve\'s classpath must be "compatible" with one another and '
"with all Scala-compiled first-party sources from `scala_sources` (and other Scala target types) "
"using that resolve. The option sets the Scala version that will be used to compile all "
"first-party sources using the resolve. This ensures that the compatibility property is "
"maintained for a resolve. To support multiple Scala versions, use multiple resolves."
),
)
def version_for_resolve(self, resolve: str) -> str:
version = self._version_for_resolve.get(resolve)
if version:
return version
return DEFAULT_SCALA_VERSION
|
larsks/pydonet
|
lib/pydonet/construct/formats/filesystem/fat12.py
|
Python
|
gpl-2.0
| 86
| 0
|
"""
File Allocation Table (FAT
|
) / 12 bit version
|
Used primarily for diskettes
"""
|
DreamSourceLab/DSView
|
libsigrokdecode4DSL/decoders/a7105/pd.py
|
Python
|
gpl-3.0
| 12,297
| 0.002114
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2020 Richard Li <richard.li@ces.hk>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class ChannelError(Exception):
pass
regs = {
# addr: ('name', size)
0x00: ('MODE', 1),
0x01: ('MODE_CTRL', 1),
0x02: ('CALC', 1),
0x03: ('FIFO_I', 1),
0x04: ('FIFO_II', 1),
0x05: ('FIFO_DATA', 1),
0x06: ('ID_DATA', 1),
0x07: ('RC_OSC_I', 1),
0x08: ('RC_OSC_II', 1),
0x09: ('RC_OSC_III', 1),
0x0a: ('CKO_PIN', 1),
0x0b: ('GPIO1_PIN_I', 1),
0x0c: ('GPIO2_PIN_II', 1),
0x0d: ('CLOCK', 1),
0x0e: ('DATA_RATE', 1),
0x0f: ('PLL_I', 1),
0x10: ('PLL_II', 1),
0x11: ('PLL_III', 1),
0x12: ('PLL_IV', 1),
0x13: ('PLL_V', 1),
0x14: ('TX_I', 1),
0x15: ('TX_II', 1),
0x16: ('DELAY_I', 1),
0x17: ('DELAY_II', 1),
0x18: ('RX', 1),
0x19: ('RX_GAIN_I', 1),
0x1a: ('RX_GAIN_II', 1),
0x1b: ('RX_GAIN_III', 1),
0x1c: ('RX_GAIN_IV', 1),
0x1d: ('RSSI_THRES', 1),
0x1e: ('ADC', 1),
0x1f: ('CODE_I', 1),
0x20: ('CODE_II', 1),
0x21: ('CODE_III', 1),
0x22: ('IF_CAL_I', 1),
0x23: ('IF_CAL_II', 1),
0x24: ('VCO_CURR_CAL', 1),
0x25: ('VCO_SB_CALC_I', 1),
0x26: ('VCO_SB_CALC_II', 1),
0x27: ('BATT_DETECT', 1),
0x28: ('TX_TEST', 1),
0x29: ('RX_DEM_TEST_I', 1),
0x2a: ('RX_DEM_TEST_II', 1),
0x2b: ('CPC', 1),
0x2c: ('CRYSTAL_TEST', 1),
0x2d: ('PLL_TEST', 1),
0x2e: ('VCO_TEST_I', 1),
0x2f: ('VCO_TEST_II', 1),
0x30: ('IFAT', 1),
0x31: ('RSCALE', 1),
0x32: ('FILTER_TEST', 1),
0x33: ('UNKNOWN', 1),
}
class Decoder(srd.Decoder):
api_version = 3
id = 'a7105'
name = 'A7105'
longname = 'AMICCOM A7105'
desc = '2.4GHz FSK/GFSK Transceiver with 2K ~ 500Kbps data rate.'
license = 'gplv2+'
inputs = ['spi']
outputs = []
tags = ['IC', 'Wireless/RF']
options = (
{'id': 'hex_display', 'desc': 'Display payload in Hex', 'default': 'yes',
'values': ('yes', 'no')},
)
annotations = (
# Sent from the host to the chip.
('cmd', 'Commands sent to the device'),
('tx-data', 'Payload sent to the device'),
# Returned by the chip.
('rx-data', 'Payload read from the device'),
('warning', 'Warnings'),
)
ann_cmd = 0
ann_tx = 1
ann_rx = 2
ann_warn = 3
annotation_rows = (
('commands', 'Commands', (ann_cmd, ann_tx, ann_rx)),
('warnings', 'Warnings', (ann_warn,)),
)
def __init__(self):
self.reset()
def reset(self):
self.next()
self.requirements_met = True
self.cs_was_released = False
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def warn(self, pos, msg):
'''Put a warning message 'msg' at 'pos'.'''
self.put(pos[0], pos[1], self.out_ann, [self.ann_warn, [msg]])
def putp(self, pos, ann, msg):
'''Put an annotation message 'msg' at 'pos'.'''
self.put(pos[0], pos[1], self.out_ann, [ann, [msg]])
def next(self):
'''Resets the decoder after a complete command was decoded.'''
# 'True' for the first byte after CS went low.
self.first = True
# The current command, and the minimum and maximum number
# of data bytes to follow.
self.cmd = None
self.min = 0
self.max = 0
# Used to collect the bytes after the command byte
# (and the start/end sample number).
self.mb = []
self.mb_s = -1
self.mb_e = -1
def mosi_bytes(self):
'''Returns the collected MOSI bytes of a multi byte command.'''
return [b[0] for b in self.mb]
def miso_bytes(self):
'''Returns the collected MISO bytes of a multi byte command.'''
return [b[1] for b in self.mb]
def decode_command(self, pos, b):
'''Decodes the command byte 'b' at position 'pos' and prepares
the decoding of the following data bytes.'''
c = self.parse_command(b)
if c is None:
self.warn(pos, 'unknown command')
return
self.cmd, self.dat, self.min, self.max = c
if self.cmd in ('W_REGISTER', 'R_REGISTER'):
# Don't output anything now, the command is merged with
# the data bytes following it.
self.mb_s = pos[0]
else:
self.putp(pos, self.ann_cmd, self.format_command())
def format_command(self):
'''Returns the label for the current command.'''
return
|
'Cmd {}'.format(self.cmd)
def parse_command(self, b):
'''Parses the command byte.
Returns a tuple consisting of:
- the name of the command
- additional data needed to dissect the following bytes
- minimum number of following bytes
- maximum number of following bytes
'''
if b == 0x05:
return ('W_TX_FIFO', None, 1, 32)
elif b == 0x45:
return ('R_RX_FIFO', None, 1, 32)
|
if b == 0x06:
return ('W_ID', None, 1, 4)
elif b == 0x46:
return ('R_ID', None, 1, 4)
elif (b & 0b10000000) == 0:
if (b & 0b01000000) == 0:
c = 'W_REGISTER'
else:
c = 'R_REGISTER'
d = b & 0b00111111
return (c, d, 1, 1)
else:
cmd = b & 0b11110000
if cmd == 0b10000000:
return ('SLEEP_MODE', None, 0, 0)
if cmd == 0b10010000:
return ('IDLE_MODE', None, 0, 0)
if cmd == 0b10100000:
return ('STANDBY_MODE', None, 0, 0)
if cmd == 0b10110000:
return ('PLL_MODE', None, 0, 0)
if cmd == 0b11000000:
return ('RX_MODE', None, 0, 0)
if cmd == 0b11010000:
return ('TX_MODE', None, 0, 0)
if cmd == 0b11100000:
return ('FIFO_WRITE_PTR_RESET', None, 0, 0)
if cmd == 0b11110000:
return ('FIFO_READ_PTR_RESET', None, 0, 0)
def decode_register(self, pos, ann, regid, data):
'''Decodes a register.
pos -- start and end sample numbers of the register
ann -- is the annotation number that is used to output the register.
regid -- may be either an integer used as a key for the 'regs'
dictionary, or a string directly containing a register name.'
data -- is the register content.
'''
if type(regid) == int:
# Get the name of the register.
if regid not in regs:
self.warn(pos, 'unknown register')
return
name = regs[regid][0]
else:
name = regid
# Multi byte register come LSByte first.
data = reversed(data)
label = '{}: {}'.format(self.format_command(), name)
self.decode_mb_data(pos, ann, data, label, True)
def decode_mb_data(self, pos, ann, data, label, always_hex):
'''Decodes the data bytes 'data' of a multibyte command at position
'pos'. The d
|
gizas/CSS_Extractor
|
replace.py
|
Python
|
mit
| 580
| 0.017241
|
#This script is for produsing a new list of sites e
|
xtracted from alexa top site list
import re
prefix = 'http://'
#suffix = '</td><td></td></tr><tr><td>waitForPageToLoad</td><td></td><td>3000</td></tr>'
with open('top100_alexa.txt','r') as f:
newlines = []
for line in f.readlines():
found=re.sub(r'\d+', '', line)
line=found
newlines.append(line.replace(',', ''))
with open('urls.txt', 'w
|
') as f:
for line in newlines:
#f.write('%s%s%s\n' % (prefix, line.rstrip('\n'), suffix))
f.write('%s%s\n' % (prefix, line.rstrip('\n')))
|
nvoron23/tarantool
|
test/lib/admin_connection.py
|
Python
|
bsd-2-clause
| 2,482
| 0.000403
|
__author__ = "Konstantin Osipov <kostja.osipov@gmail.com>"
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are
|
met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and
|
the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import socket
import yaml
import sys
import re
from tarantool_connection import TarantoolConnection
ADMIN_SEPARATOR = '\n'
class AdminConnection(TarantoolConnection):
def execute_no_reconnect(self, command, silent):
if not command:
return
if not silent:
sys.stdout.write(command + ADMIN_SEPARATOR)
cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR
self.socket.sendall(cmd)
bufsiz = 4096
res = ""
while True:
buf = self.socket.recv(bufsiz)
if not buf:
break
res = res + buf
if (res.rfind("\n...\n") >= 0 or res.rfind("\r\n...\r\n") >= 0):
break
# validate yaml by parsing it
try:
yaml.load(res)
finally:
if not silent:
sys.stdout.write(res.replace("\r\n", "\n"))
return res
def connect(self):
super(AdminConnection, self).connect()
handshake = self.socket.recv(128)
if not re.search(r'^Tarantool.*console.*', str(handshake)):
raise RuntimeError('Broken tarantool console handshake')
|
tbenthompson/LMS_public
|
lms_code/analysis/just_detach_bem.py
|
Python
|
mit
| 846
| 0.002364
|
import lms_code.lib.rep2 as rep2
from lms_code.analysis.run_bem import bemify, boundary_conditions,\
assemble, constrain, solve, evaluate_surface_disp
from lms_code.analysis.simplified_bem import create_surface
|
_mesh, \
set_params
from codim1.core import simple_line_mesh, combine_meshes, ray_mesh
def create_fault_mesh(d):
top_fault_vert = [0, -1e9]
top = d['intersection_pt']
joint = [4.20012e5 + 1.6, -2.006e4 - 5]
bottom = [3.09134e5 + 1.1, -2.3376e4 - 3]
detach = simple_line_mesh(d['fault_elements'], bottom, joint)
d['fault_mesh'] = detach
if __name__ == "__main__":
d = dict()
set_params(d)
create_fault_mesh(d)
|
create_surface_mesh(d)
bemify(d)
boundary_conditions(d)
assemble(d)
# constrain(d)
solve(d)
evaluate_surface_disp(d)
rep2.save("bem_just_detach", d)
|
pandeydivesh15/AVSR-Deep-Speech
|
DeepSpeech_RHL.py
|
Python
|
gpl-2.0
| 72,056
| 0.005551
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import sys
log_level_index = sys.argv.index('--log_level') + 1 if '--log_level' in sys.argv else 0
os.environ['TF_CPP_MIN_LOG_LEVEL'] = sys.argv[log_level_index] if log_level_index > 0 and log_level_index < len(sys.argv) else '3'
import datetime
import pickle
import shutil
import subprocess
import tensorflow as tf
import time
import inspect
from six.moves import zip, range, filter, urllib, BaseHTTPServer
from tensorflow.contrib.session_bundle import exporter
from tensorflow.python.tools import freeze_graph
from threading import Thread, Lock
from util.data_set_helpers_RHL import SwitchableDataSet, read_data_sets
from util.gpu import get_available_gpus
from util.shared_lib import check_cupti
from util.spell import correction
from util.text_RHL import sparse_tensor_value_to_texts, wer
from xdg import BaseDirectory as xdg
# Importer
# ========
tf.app.flags.DEFINE_string ('train_files', '', 'comma separated list of files specifying the dataset used for training. multiple files will get merged')
tf.app.flags.DEFINE_string ('dev_files', '', 'comma separated list of files specifying the dataset used for validation. multiple files will get merged')
tf.app.flags.DEFINE_string ('test_files', '', 'comma separated list of files specifying the dataset used for testing. multiple files will get merged')
tf.app.flags.DEFINE_boolean ('fulltrace', False, 'if full trace debug info should be generated during training')
# Cluster configuration
# =====================
tf.app.flags.DEFINE_string ('ps_hosts', '', 'parameter servers - comma separated list of hostname:port pairs')
tf.app.flags.DEFINE_string ('worker_hosts', '', 'workers - comma separated list of hostname:port pairs')
tf.app.flags.DEFINE_string ('job_name', 'localhost', 'job name - one of localhost (default), worker, ps')
tf.app.flags.DEFINE_integer ('task_index', 0, 'index of task within the job - worker with index 0 will be the chief')
tf.app.flags.DEFINE_integer ('replicas', -1, 'total number of replicas - if negative, its absolute value is multiplied by the number of workers')
tf.app.flags.DEFINE_integer ('replicas_to_agg', -1, 'number of replicas to aggregate - if negative, its absolute value is multiplied by the number of workers')
tf.app.flags.DEFINE_string ('coord_retries', 100, 'number of tries of workers connecting to training coordinator before failing')
tf.app.flags.DEFINE_string ('coord_host', 'localhost', 'coordination server host')
tf.app.flags.DEFINE_integer ('coord_port', 2500, 'coordination server port')
tf.app.flags.DEFINE_integer ('iters_per_worker', 1, 'number of train or inference iterations per worker before results are sent back to coordinator')
# Global Constants
# ================
tf.app.flags.DEFINE_boolean ('train', True, 'wether to train the network')
tf.app.flags.DEFINE_boolean ('test', True, 'wether to test the network')
tf.app.flags.DEFINE_integer ('epoch', 75, 'target epoch to train - if negative, the absolute number of additional epochs will be trained')
tf.app.flags.DEFINE_boolean ('use_warpctc', False, 'wether to use GPU bound Warp-CTC')
tf.app.flags.DEFINE_float ('dropout_rate', 0.05, 'dropout rate for feedforward layers')
tf.app.flags.DEFINE_float ('dropout_rate2', -1.0, 'dropout rate for layer 2 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('dropout_rate3', -1.0, 'dropout rate for layer 3 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('dropout_rate4', 0.0, 'dropout rate for layer 4 - defaults to 0.0')
tf.app.flags.DEFINE_float ('dropout_rate5', 0.0, 'dropout rate for layer 5 - defaults to 0.0')
tf.app.flags.DEFINE_float ('dropout_rate6', -1.0, 'dropout rate for layer 6 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('relu_clip', 20.0, 'ReLU clipping value for non-recurrant layers')
# Adam optimizer (http://arxiv.org/abs/1412.6980) parameters
tf.app.flags.DEFINE_float ('beta1', 0.9, 'beta 1 parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('beta2', 0.999, 'beta 2 parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('epsilon', 1e-8, 'epsilon parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('learning_rate', 0.001, 'learning rate of Adam optimizer')
# Batch sizes
tf.app.flags.DEFINE_integer ('train_batch_size', 1, 'number of elements in a training batch')
tf.app.flags.DEFINE_integer ('dev_batch_size', 1, 'number of elements in a validation batch')
tf.app.flags.DEFINE_integer ('test_batch_size', 1, 'number of elements in a test batch')
# Sample limits
tf.app.flags.DEFINE_integer ('limit_train', 0, 'maximum number of elements to use from train set - 0 means no limit')
tf.app.flags.DEFINE_integer ('limit_dev', 0, 'maximum number of elements to use from validation set- 0 means no limit')
tf.app.flags.DEFINE_integer ('limit_test', 0, 'maximum number of elements to use from test set- 0 means no limit')
# Step widths
tf.app.flags.DEFINE_integer ('display_step', 0, 'number of epochs we cycle through before displaying detailed progress - 0 means no progress display')
tf.app.flags.DEFINE_integer ('validation_step', 0, 'number of epochs we cycle through before validating the model - a detailed progress report is dependent on "--display_step" - 0 means no validation steps')
# Checkpointing
tf.app.flags.DEFINE_string ('checkpoint_dir', '', 'directory in which checkpoints are stored - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification')
tf.app.flags.DEFINE_integer ('checkpoint_secs', 600, 'checkpoint saving interval in seconds')
# Exporting
tf.app.flags.DEFINE_string ('export_dir', '', 'directory in which exported models are stored - if omitted, the model won\'t get exported')
tf.app.flags.DEFINE_integer ('export_version', 1, 'version number of the exported model')
tf.app.flags.DEFINE_boolean ('remove_export', False, 'wether to remove old exported models')
# Reporting
tf.app.flags.DEFINE_integer ('log_level', 1, 'log level for console logs - 0: INFO, 1: WARN, 2: ERROR, 3: FATAL')
tf.app.flags.DEFINE_boolean ('log_traffic', False, 'log cluster transaction and traffic information during debug logging')
tf.app.flags.DEFINE_string ('wer_log_pattern', '', 'pattern for machine readable global logging of WER progress; has to contain %%s, %%s and %%f for the set name, the date and the float respectively; example: "GLOBAL LOG: logwer(\'12ade231\', %%s, %%s, %%f)" would result in some entry like "GLOBAL LOG: logwer(\'12ade231\', \'train\', \'2017-05-18T03:09:48-0700\', 0.05)"; if omitted (default), there will be no logging')
tf.app.flags.DEFINE_boolean ('log_placement',
|
False, 'wether to log device placement of the operators to the console')
tf.app.flags.DEFINE_integer ('report_count', 10, 'number of phrases with lowest WER (best matching) to print out during a WER report')
tf.app.flags.DEFINE_string ('summary_dir', '', 'target directory for TensorBoard summaries - defaults to directory "deepspeech/su
|
mmaries" within user\'s data home specified by the XDG Base Directory Specification')
tf.app.flags.DEFINE_integer ('summary_secs', 0, 'interval in seconds for saving TensorBoard summaries - if 0, no summaries will be written')
# Geometry
tf.app.flags.DEFINE_integer ('n_hidden', 2048, 'layer width to use when initialising layers')
# Initialization
tf.app.flags.DEFINE_integer ('random_seed', 4567, 'default random seed that is used
|
sam-m888/gramps
|
gramps/gen/filters/rules/place/_hasnolatorlon.py
|
Python
|
gpl-2.0
| 1,902
| 0.005783
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin S
|
treet, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen.filters.rules/Place/_HasNoLatOrLon.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#---------------------------------------
|
----------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasNoLatOrLon
#
#-------------------------------------------------------------------------
class HasNoLatOrLon(Rule):
"""Rule that checks if Latitude or Longitude are not given"""
labels = []
name = _('Places with no latitude or longitude given')
description = _("Matches places with empty latitude or longitude")
category = _('Position filters')
def apply(self,db,place):
if place.get_latitude().strip and place.get_longitude().strip():
return False
return True
|
nighres/nighres
|
nighres/surface/levelset_curvature.py
|
Python
|
apache-2.0
| 4,723
| 0.002117
|
import os
import numpy as np
import nibabel as nb
import nighresjava
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving
def levelset_curvature(levelset_image, distance=1.0,
save_data=False, overwrite=False, output_dir=None,
file_name=None):
"""Levelset curvature
Estimates surface curvature of a levelset using a quadric approximation scheme.
Parameters
----------
levelset_image: niimg
Levelset image to be turned into probabilities
distance: float, optional
Distance from the boundary in voxels where to estimate the curvature
save_data: bool, optional
Save output data to file (default is False)
overwrite: bool, optional
Overwrite existing results (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
----------
dict
Dictionary collecting outputs under the following keys
(suffix of output files in brackets)
* mcurv (niimg): Mean curvature (output file suffix _curv-mean)
* gcurv (niimg): Gaussian curvature (output file suffix _curv-gauss)
Notes
----------
Ported from original Java module by Pierre-Louis Bazin
"""
print("\nLevelset Curvature")
# make sure that saving related parameters are correct
if save_data:
output_dir = _output_dir_4saving(output_dir, levelset_image)
mcurv_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=levelset_image,
suffix='curv-mean'))
gcurv_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=levelset_image,
suffix='curv-gauss'))
if overwrite is False \
and os.path.isfile(mcurv_file) \
and os.path.isfile(gcurv_file) :
print("skip computation (use existing results)")
output = {'mcurv': mcurv_file, 'gcurv': gcurv_file}
|
return output
# load the data
lvl_img = load_volume(levelset_image)
lvl_data = lvl_img.get_data()
hdr = lvl_img.header
aff = lvl_img.affine
resolution = [x.item() for x in hdr.get_zooms()]
dimensions = lvl_data.shape
# algorithm
# start virtual machine, if not already running
try:
mem = _check_available_memory()
nighresjava.initVM(initialheap=mem['init'], maxheap=mem['max'])
except ValueError:
|
pass
# create algorithm instance
algorithm = nighresjava.LevelsetCurvature()
# set parameters
algorithm.setMaxDistance(distance)
# load images and set dimensions and resolution
input_image = load_volume(levelset_image)
data = input_image.get_data()
affine = input_image.get_affine()
header = input_image.get_header()
resolution = [x.item() for x in header.get_zooms()]
dimensions = input_image.shape
algorithm.setDimensions(dimensions[0], dimensions[1], dimensions[2])
algorithm.setResolutions(resolution[0], resolution[1], resolution[2])
algorithm.setLevelsetImage(nighresjava.JArray('float')(
(data.flatten('F')).astype(float)))
# execute
try:
algorithm.execute()
except:
# if the Java module fails, reraise the error it throws
print("\n The underlying Java code did not execute cleanly: ")
print(sys.exc_info()[0])
raise
return
# Collect output
mcurv_data = np.reshape(np.array(
algorithm.getMeanCurvatureImage(),
dtype=np.float32), dimensions, 'F')
gcurv_data = np.reshape(np.array(
algorithm.getGaussCurvatureImage(),
dtype=np.float32), dimensions, 'F')
hdr['cal_min'] = np.nanmin(mcurv_data)
hdr['cal_max'] = np.nanmax(mcurv_data)
mcurv = nb.Nifti1Image(mcurv_data, aff, hdr)
hdr['cal_min'] = np.nanmin(gcurv_data)
hdr['cal_max'] = np.nanmax(gcurv_data)
gcurv = nb.Nifti1Image(gcurv_data, aff, hdr)
if save_data:
save_volume(mcurv_file, mcurv)
save_volume(gcurv_file, gcurv)
return {'mcurv': mcurv_file, 'gcurv': gcurv_file}
else:
return {'mcurv': mcurv, 'gcurv': gcurv}
|
zmcartor/Algorithms
|
Python/better_inversion_count.py
|
Python
|
mit
| 485
| 0.012371
|
# based on killer algo found here:
# http://codereview.stackexchange.com/questions/12922/inversion-count-using-merge-sort
import sys, bisec
|
t
input_list = map(int,open(sys.argv[1]))
sorted_list = sorted(input_list)
inversions = 0
# we compare the unsorted list to the sorted list
# to compute inversion count, neat!
for d in input_list:
#locate insertion point in sorted_list for d
p = bisect.bisect_left(sorted_list,d)
inversions += p
input_list.
|
pop(p)
print inversions
|
butterscotchstallion/SpiffyRPG
|
SpiffyRPG/SpiffyWorld/models/unit_dialogue.py
|
Python
|
mit
| 1,421
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class UnitDialogue:
"""
Unit dialogue model
"""
def __init__(self, **kwargs):
self.db = kwargs["db"]
self.dialogue = {}
def _get_unit
|
_dialogue_map(self, dialogue):
unit_dialogue_map = {}
for unit_dialogue in dialogue:
unit_id = unit_dialogue["unit_id"]
if unit_id not in unit_dialogue_map:
unit_dialogue_map[unit_id] = []
unit_dialogue_map[unit_id].append(un
|
it_dialogue["dialogue_id"])
return unit_dialogue_map
def get_unit_dialogue(self):
"""
Get unit dialogue IDs. Those will be queried
against the dialogue collection to get the rest
of the dialogue information
"""
cursor = self.db.cursor()
cursor.execute("""SELECT
ud.id,
ud.id AS dialogue_id,
ud.unit_id,
ud.dialogue,
ud.context
FROM spiffyrpg_unit_dialogue ud
LEFT JOIN spiffyrpg_units u ON u.id = ud.unit_id""")
tmp_dialogue = cursor.fetchall()
cursor.close()
dialogue = []
if tmp_dialogue:
for e in tmp_dialogue:
dia = dict(e)
dialogue.append(dia)
return dialogue
|
marceloomens/appointments
|
appointments/apps/common/views.py
|
Python
|
mit
| 7,355
| 0.007886
|
from django.conf import settings
from django.contrib import messages
from django.forms import Form
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import ugettext as _
import dateutil.parser, json
from itsdangerous import BadSignature
from appointments.apps.timeslots.models import Action, Constraint
from appointments.apps.timeslots.utils import strfdate, strftime, strptime, is_available
from .forms import ReminderForm
from .models import Appointment, User
from .utils import get_logger, get_serializer, send_confirmation, send_receipt, send_reminder
# Create your views here.
def book(request):
logger = get_logger(__name__, request)
if 'POST' == request.method and request.is_ajax():
fields = json.loads(request.body)
try:
user = User.objects.get(email__iexact=fields['email'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (email)")
return HttpResponseBadRequest()
except User.DoesNotExist:
user = User(email=fields['email'], is_active=False)
user.save()
logger.info("New user %s" % (str(user)))
try:
action = Action.objects.get(slug=fields['action'])
except (KeyError, Action.DoesNotExist):
logger.warning("Bad form submission: KeyError (action) or Action.DoesNotExist")
# This is an error; time to log, then fail
return HttpResponseBadRequest()
try:
constraint = Constraint.objects.get(slug=fields['constraint'])
except (KeyError, Constraint.DoesNotExist):
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (constraint) or Constraint.DoesNotExist")
return HttpResponseBadRequest()
if action not in constraint.actions.all():
# This is an error; time to log, then fail
logger.warning("Bad form submission: bad constraint/action combination")
return HttpResponseBadRequest()
# Ignore timezone to prevent one-off problems
try:
date = dateutil.parser.parse(fields['date'], ignoretz=True).date()
time = strptime(fields['time'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (date and/or time)")
return HttpResponseBadRequest()
# Check if timeslot is available
if not is_available(constraint, date, time):
# Return some meaningful JSON to say that time is not available
logger.warning("Bad form submission: timeslot not available")
return HttpResponseBadRequest()
# Preprocess sex to ensure it's a valid value
sex = fields['sex'][0].upper() if fields.get('sex', None) else None
if sex not in ['M', 'F']:
sex = ''
appointment = Appointment(
user=user,
action=action,
constraint=constraint,
date=date,
time=time,
# Optional fields...
first_name=fields.get('first_name',''),
last_name=fields.get('last_name',''),
nationality = fields.get('nationality',''),
sex=sex,
# See if this works without any changes...
identity_number=fields.get('identity_number',''),
document_number=fields.get('document_number',''),
phone_number=fields.get('phone_number',''),
mobile_number=fields.get('mobile_number',''),
comment=fields.get('comment',''),
)
# Save the appointment; then log it
appointment.save()
logger.info("New appointment by %s in %s/%s on %s at %s" % (
str(appointment.user),
appointment.constraint.key.slug,
appointment.constraint.slug,
strfdate(appointment.date),
strftime(appointment.time),
)
)
send_receipt(appointment)
messages.success(request, _("We've send you an e-mail receipt. Please confirm your appointment by following the instructions."))
# Return some JSON...
return HttpResponse("Ok")
elif 'POST' == request.method:
logger.warning("XMLHttpRequest header not set on POST request")
return HttpResponseBadRequest("XMLHttpRequest (AJAX) form submissions only please!")
return render(request, 'book.html')
def cancel(request, payload):
from itsdangerous import BadSignature
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.warning(request, _("You've already cancelled this appointment."))
return redirect('finish')
if 'POST' == request.method:
form = Form(request.POST)
if form.is_valid():
appointment.cancel()
messages.info(request, _("You successfully cancelled your appointment."))
return redirect('finish')
# This doesn't seem to be the correct return code
return Http404
form = Form()
return render(request, 'cancel.html', {'form': form})
def confirm(request, payload):
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.error(request, _("You cannot reconfirm a cancelled appointment. Please book again."))
elif appointment.is_confirmed():
messages.warning(request, _("Thank you, no need to reconfirm."))
else:
appointment.confirm()
appointment.user.verify()
send_confirmation(appointment)
messages.success(request, _("Thank you for confirming your appointment."))
return redirect('finish')
def reminder(request):
if 'POST' == request.method:
form = ReminderForm(request.POST)
if
|
form.is_valid():
email = form.cleaned_data['email']
try:
user = User.objects.get(email=email)
date = timezon
|
e.now().date()
appointments = user.appointments.filter(date__gte=date)
send_reminder(user, appointments)
except User.DoesNotExist:
pass
messages.success(request, _("We'll send you an e-mail with all your appointments."))
return redirect('finish')
else:
form = ReminderForm()
return render(request, 'reminder.html', {'form': form})
# Custom error views
def handler404(request):
return render(request, '404.html')
|
habanero-rice/hclib
|
test/performance-regression/full-apps/qmcpack/nexus/library/superstring.py
|
Python
|
bsd-3-clause
| 11,640
| 0.029639
|
'''
superstring
a collection of functions to manipulate strings
general purpose
next_visible_character
remove_whitespace
shrink_whitespace
var2string
string2array
is_string
stringmap
stringbreak
find_matching_brace
remove_comment_lines
contains_any
contains_all
C/C++
find_preprocessor_end
find_commend_block_end
find_matching_cbrace
'''
from numpy import array
import sys
import string
#/////////////////////////////////////////////////
#/////// general purpose ///////
#/////////////////////////////////////////////////
def next_visible_character(string,start,end):
i = start
character_visible = False
while not character_visible and i<end:
c = string[i]
character_visible = c!=' ' and c!='\t' and c!='\n'
i+=1
#end while
if character_visible:
vis_char = c
vis_loc = i-1
else:
vis_char = ''
vis_loc = -1
#end
|
if
return (vis_char,vis_loc)
#end def next_visible_characte
|
r
def remove_whitespace(s):
sr = s.replace('\n','').replace('\t','').replace(' ','')
return
#end def remove_whitespace
def shrink_whitespace(si):
sw = si.strip().replace('\n','')
lst = sw.split(' ')
s = ''
for t in lst:
if(t!=''):
s += t+' '
#end if
#end for
return s
#end def shrink_whitespace
def var2string(v):
vt = type(v)
nt = type(None)
st = type(str(1))
it = type(1)
rt = type(1.0)
at = type(array([[1]]))
simple_set = set([nt,st,it,rt])
s = ''
if(vt == at):
(n,m) = v.shape
for i in range(n):
for j in range(m):
s += str(v[i,j]) + ' '
#end for
s += '\n'
#end for
elif(vt in simple_set):
s = str(v)
else:
print 'ERROR: in var2string'
print ' type '+str(vt)+' not implemented'
sys.exit()
#end if
return s
#end def var2string
#string2val = lambda x: x.isalpha() and x \
# or x.isdigit() and int(x) \
# or x.isalnum() and x \
# or len(set(string.punctuation).intersection(x)) == 1 and x.count('.') == 1 and float(x) \
# or x
def sbool(var):
if var=='True':
return True
elif var=='False':
return False
else:
return var
#end if
#end def sbool
def is_bool(var):
return var==True or var==False or var in ['True','False']
#end def is_bool
def is_int(var):
try:
int(var)
return True
except ValueError:
return False
#end def is_float
def is_float(var):
try:
float(var)
return True
except ValueError:
return False
#end def is_float
def is_array(var,type,delim=None):
try:
if isinstance(var,str):
array(var.split(delim),type)
else:
array(var,type)
#end if
return True
except ValueError:
return False
#end def is_float_array
def string2val(s,delim=None):
if is_bool(s):
val = sbool(s)
elif is_int(s):
val = int(s)
elif is_float(s):
val = float(s)
elif is_array(s,int,delim):
val = array(s.split(delim),int)
elif is_array(s,float,delim):
val = array(s.split(delim),float)
else:
val = s
#end if
return val
#end def string2val
def string2array(string):
ilst = string.strip().split(' ')
lst = []
for l in ilst:
if(l.strip()!=''):
lst.append(float(l))
#end if
#end for
return array(lst)
#end def string2array
def is_string(var):
return type(var)==type("s")
#end def is_string
def stringmap(s):
smap=[]
quotes=set(['"',"'"])
altquote={'"':"'","'":'"'}
instr=False
depth=0
for i in range(len(s)):
c=s[i]
if not instr and c in quotes:
instr=True
lastquote=c
depth=1
direction=1
elif instr and c in quotes:
if c!=altquote[lastquote]:
direction=-1
#end if
lastquote=c
depth+=direction
#end if
smap+=[instr]
if depth==0:
instr=False
#end if
#end for
return smap
#end def stringmap
def stringbreak(s,delimiter):
strings=[]
blocks=''
strstart=s.startswith('"') or s.startswith("'")
nblocks=0
smap=[]
quotes=set(['"',"'"])
altquote={'"':"'","'":'"'}
instr=False
bstart=0
depth=0
for i in range(len(s)):
c=s[i]
if not instr and c in quotes:
instr=True
lastquote=c
depth=1
direction=1
sstart=i
bend=i
if bend>0:
blocks+=s[bstart:bend]+delimiter
#end if
elif instr and c in quotes:
if c!=altquote[lastquote]:
direction=-1
#end if
lastquote=c
depth+=direction
#end if
#smap+=[instr]
if depth==0 and instr:
send=i+1
strings+=[s[sstart:send]]
instr=False
bstart=send
#end if
#end for
if not instr:
bend=len(s)
blocks+=s[bstart:bend]+delimiter
#end if
return strings,blocks,strstart
#end def stringbreak
def find_matching_brace(string,start,end):
brace_dict = dict( [ ('(',')'), ('[',']'), ('{','}'), ('<','>') ] )
left_brace = string[start]
right_brace = brace_dict[left_brace]
found_match = False
i = start + 1
left_scope = 0
right_scope = 0
while not found_match and i<end:
if string[i]==left_brace:
right_scope+=1
elif string[i]==right_brace:
found_match = right_scope==left_scope
right_scope-=1
#end if
i+=1
#end while
if found_match:
brace_loc = i-1
else:
brace_loc = -1
#end if
return brace_loc
#end def find_matching_brace
def find_matching_pair(s,pair,start=0,end=-1):
if end==-1:
end=len(s)
#end if
left = pair[0]
right = pair[1]
llen=len(left)
rlen=len(right)
ileft = s.find(left,start,end)
iright = -1
if ileft==-1:
return ileft,iright
else:
i=ileft+llen
left_scope = 0
right_scope = 0
found_match = False
failed = False
while not found_match and i<end:
nleft = s.find(left,i,end)
nright = s.find(right,i,end)
if nleft!=-1 and nleft<nright:
right_scope+=1
i=nleft+llen
elif nright!=-1:
found_match = right_scope==left_scope
right_scope-=1
i=nright+rlen
elif nright==-1:
failed=True
break
#end if
#end while
if found_match:
iright = i
#end if
if failed:
ileft,iright=-1,-1
#end if
#end if
return ileft,iright
#end def find_matching_pair
def remove_pair_sections(s,pair):
sc=s
ir=0
n=0
while ir!=-1 and n<10:
il,ir = find_matching_pair(sc,pair)
sc=sc.replace(sc[il:ir],'')
#end while
return sc
#end def
def remove_comment_lines(comment_char,s_in):
lines = s_in.splitlines()
s_out=''
for l in lines:
if not l.strip().startswith(comment_char):
s_out=s_out+l+'\n'
#end if
#end if
return s_out
#def remove_comment_lines
def remove_empty_lines(s):
sr=''
lines = s.splitlines()
for l in lines:
if l.strip()!='':
sr+=l + '\n'
#end if
#end for
return sr
#end def remove_empty_lines
def contains_any(str, set):
for c in set:
if c in str: return 1;
return 0;
#end def contains_any
def contains_all(str, set):
for c in set:
if c not in str: return 0;
return 1;
#end def contains_all
invalid_variable_name_chars=set('!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}-\n\t ')
def valid_variable_
|
Solid-Mechanics/matplotlib-4-abaqus
|
matplotlib/sphinxext/plot_directive.py
|
Python
|
mit
| 27,667
| 0.002205
|
"""
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directoly where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behaviour
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing resturctured text.
"""
from __future__ import print_function
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap
import traceback
from docutils.parsers.rst import directives
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.p
|
yplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
# os.path.relpath is new in Python 2.6
try:
from os.path import relpath
except ImportErro
|
r:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
|
jonparrott/botocore
|
botocore/hooks.py
|
Python
|
mit
| 10,052
| 0
|
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import inspect
import six
from collections import defaultdict, deque
import logging
logger = logging.getLogger(__name__)
def first_non_none_response(responses, default=None):
"""Find first non None response in a list of tuples.
This function can be used to find the first non None response from
handlers connected to an event. This is useful if you are interested
in the returned responses from event handlers. Example usage::
print(first_non_none_response([(func1, None), (func2, 'foo'),
|
(func3, 'bar')]))
# This will print 'foo'
:type responses: list of tuples
:param responses: The responses from the ``EventHooks.emit`` method.
This is a list of tuples, and each tuple is
(handler, handler_response).
:param default: If no non-None responses are found, then this default
value will be returned.
:return: The first non-None response in the list of tuples.
"""
for response in responses:
if respo
|
nse[1] is not None:
return response[1]
return default
class BaseEventHooks(object):
def emit(self, event_name, **kwargs):
return []
def register(self, event_name, handler):
self._verify_is_callable(handler)
self._verify_accept_kwargs(handler)
self._register(event_name, handler)
def unregister(self, event_name, handler):
pass
def _verify_is_callable(self, func):
if not six.callable(func):
raise ValueError("Event handler %s must be callable." % func)
def _verify_accept_kwargs(self, func):
"""Verifies a callable accepts kwargs
:type func: callable
:param func: A callable object.
:returns: True, if ``func`` accepts kwargs, otherwise False.
"""
try:
argspec = inspect.getargspec(func)
except TypeError:
return False
else:
if argspec[2] is None:
raise ValueError("Event handler %s must accept keyword "
"arguments (**kwargs)" % func)
class EventHooks(BaseEventHooks):
def __init__(self):
# event_name -> [handler, ...]
self._handlers = defaultdict(list)
def emit(self, event_name, **kwargs):
"""Call all handlers subscribed to an event.
:type event_name: str
:param event_name: The name of the event to emit.
:type **kwargs: dict
:param **kwargs: Arbitrary kwargs to pass through to the
subscribed handlers. The ``event_name`` will be injected
into the kwargs so it's not necesary to add this to **kwargs.
:rtype: list of tuples
:return: A list of ``(handler_func, handler_func_return_value)``
"""
kwargs['event_name'] = event_name
responses = []
for handler in self._handlers[event_name]:
response = handler(**kwargs)
responses.append((handler, response))
return responses
def _register(self, event_name, handler):
self._handlers[event_name].append(handler)
def unregister(self, event_name, handler):
try:
self._handlers[event_name].remove(handler)
except ValueError:
pass
class HierarchicalEmitter(BaseEventHooks):
def __init__(self):
# We keep a reference to the handlers for quick
# read only access (we never modify self._handlers).
# A cache of event name to handler list.
self._lookup_cache = {}
self._handlers = _PrefixTrie()
def emit(self, event_name, **kwargs):
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
logger.debug('emit: %s' % event_name)
if event_name in self._lookup_cache:
handlers_to_call = self._lookup_cache[event_name]
else:
handlers_to_call = self._handlers_for_event(event_name)
self._lookup_cache[event_name] = handlers_to_call
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('emit: calling %s' % handler)
response = handler(**kwargs)
responses.append((handler, response))
return responses
def _handlers_for_event(self, event):
return self._handlers.prefix_search(event)
def _register(self, event_name, handler):
# Super simple caching strategy for now, if we change the registrations
# clear the cache. This has the opportunity for smarter invalidations.
self._handlers.append_item(event_name, handler)
self._lookup_cache = {}
def unregister(self, event_name, handler):
try:
self._handlers.remove_item(event_name, handler)
self._lookup_cache = {}
except ValueError:
pass
class _PrefixTrie(object):
"""Specialized prefix trie that handles wildcards.
The prefixes in this case are based on dot separated
names so 'foo.bar.baz' is::
foo -> bar -> baz
Wildcard support just means that having a key such as 'foo.bar.*.baz' will
be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``.
You can think of this prefix trie as the equivalent as defaultdict(list),
except that it can do prefix searches:
foo.bar.baz -> A
foo.bar -> B
foo -> C
Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from
most specific to least specific.
"""
def __init__(self):
self._root = _Node(None, None)
def append_item(self, key, value):
"""Add an item to a key.
If a value is already associated with that key, the new
value is appended to the list for the key.
"""
key_parts = key.split('.')
current = self._root
for part in key_parts:
if part not in current.children:
new_child = _Node(part)
current.children[part] = new_child
current = new_child
else:
current = current.children[part]
if current.values is None:
current.values = [value]
else:
current.values.append(value)
def prefix_search(self, key):
"""Collect all items that are prefixes of key.
Prefix in this case are delineated by '.' characters so
'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" (
"foo", "bar", and "baz").
"""
collected = deque()
key_parts = key.split('.')
current = self._root
self._get_items(current, key_parts, collected, index=0)
return collected
def remove_item(self, key, value):
"""Remove an item associated with a key.
If the value is not associated with the key a ``ValueError``
will be raised. If the key does not exist in the trie, a
``ValueError`` will be raised.
"""
|
kbuschme/irony-detection
|
setup.py
|
Python
|
gpl-3.0
| 301
| 0.009967
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from nltk import do
|
wnload
TOKENIZER_MODEL = "punkt"
POS_TAGGER = "maxent_treebank_pos_tagger"
def downloadDependencies():
download(TOKENIZER_MODEL)
download(POS_TAGGER)
if __name__ == '__main__
|
':
downloadDependencies()
|
coreycb/charms.openstack
|
charms_openstack/plugins/__init__.py
|
Python
|
apache-2.0
| 1,179
| 0
|
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed
|
to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Pull in helpers that 'charms_openstack.plugins' will export
from charms_openstack.plugins.adapters import (
CephRelationAdapter,
)
from charms_openstack.plugins.classes
|
import (
BaseOpenStackCephCharm,
CephCharm,
PolicydOverridePlugin,
)
from charms_openstack.plugins.trilio import (
TrilioVaultCharm,
TrilioVaultSubordinateCharm,
TrilioVaultCharmGhostAction,
)
__all__ = (
"BaseOpenStackCephCharm",
"CephCharm",
"CephRelationAdapter",
"PolicydOverridePlugin",
"TrilioVaultCharm",
"TrilioVaultSubordinateCharm",
"TrilioVaultCharmGhostAction",
)
|
aeklant/scipy
|
scipy/stats/_rvs_sampling.py
|
Python
|
bsd-3-clause
| 7,080
| 0.000141
|
import numpy as np
import warnings
from scipy._lib._util import check_random_state
def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is the probability
density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
If `random_state` is `None` the `~np.random.RandomState` singleton is
used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with random_state.
If `random_state` is already a ``RandomState`` or ``Generator``
instance, then that object is used.
Default is None.
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
Notes
-----
Given a univariate probability density function `pdf` and a constant `c`,
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
If `(U, V)` is a random vector uniformly distributed over `A`,
then `V/U + c` follows a distribution according to `pdf`.
The above result (see [1]_, [2]_) can be used to sample random variables
using only the pdf, i.e. no inversion of the cdf is required. Typical
choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
- ``umax = sup sqrt(pdf(x))``
- ``vmin = inf (x - c) sqrt(pdf(x))``
- ``vmax = sup (x - c) sqrt(pdf(x))``
In particular, these values are finite if `pdf` is bounded and
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
One can generate `(U, V)` uniformly on `R` and return
`V/U + c` if `(U, V)` are also in `A` which can be directly
verified.
Intuitively, the method works well if `A` fills up most of the
enclosing rectangle such that the probability is high that `(U, V)`
lies in `A` whenever it lies in `R` as the number of required
iterations becomes too large otherwise. To be more precise, note that
the expected number of iterations to draw `(U, V)` uniformly
distributed on `R` such that `(U, V)` is also in `A` is given by
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact
that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning
is displayed if this ratio is larger than 20. Moreover, if the sampling
fails to generate a single random variate after 50000 iterations (i.e.
not a single draw is in `A`), an exception is raised.
If the bounding rectangle is not correctly specified (i.e. if it does not
contain `A`), the algorithm samples from a distribution different from
the one given by `pdf`. It is therefore recommended to perform a
test such as `~scipy.stats.kstest` as a check.
References
----------
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
Springer-Verlag, 1986.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
Variables Using the Ratio of Uniform Deviates",
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
Examples
--------
>>> from scipy import stats
Simulate normally distributed random variables. It is easy to compute the
bounding rectangle explicitly in that case.
>>> f = stats.norm.pdf
>>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
>>> umax, vmin, vmax = np.sq
|
rt(f(0)), -v_bound, v_bound
>>> np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500)
The K-S test confirms that the random variates are indeed normally
distributed (normality is not rejected at 5% significance level):
>>> stats.kstest(rvs, 'norm')[1]
0.3420173467307603
The exponential distribution provides another example where the bounding
rectangle can be determined explicitly.
>>>
|
np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
... vmin=0, vmax=2*np.exp(-1), size=1000)
>>> stats.kstest(rvs, 'expon')[1]
0.928454552559516
Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g.
[2]_ above in the case of the generalized inverse Gaussian distribution.
"""
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
exp_iter = 2 * (vmax - vmin) * umax # rejection constant (see [1])
if exp_iter > 20:
msg = ("The expected number of iterations to generate a single random "
"number from the desired distribution is larger than {}, "
"potentially causing bad performance.".format(int(exp_iter)))
warnings.warn(msg, RuntimeWarning)
size1d = tuple(np.atleast_1d(size))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# start sampling using ratio of uniforms method
rng = check_random_state(random_state)
x = np.zeros(N)
simulated, i = 0, 1
# loop until N rvs have been generated: expected runtime is finite
# to avoid infinite loop, raise exception if not a single rv has been
# generated after 50000 tries. even if exp_iter = 1000, probability of
# this event is (1-1/1000)**50000 which is of order 10e-22
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u1 = umax * rng.uniform(size=k)
v1 = rng.uniform(vmin, vmax, size=k)
# apply rejection method
rvs = v1 / u1 + c
accept = (u1**2 <= pdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated in {} "
"attempts. The ratio of uniforms method does not appear "
"to work for the provided parameters. Please check the "
"pdf and the bounds.".format(i*N))
raise RuntimeError(msg)
i += 1
return np.reshape(x, size1d)
|
karan259/GrovePi
|
Software/Python/grove_thumb_joystick.py
|
Python
|
mit
| 3,278
| 0.003661
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Thumb Joystick (http://www.seeedstudio.com/wiki/Grove_-_Thumb_Joystick)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice sh
|
all be included in
all cop
|
ies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove Thumb Joystick to analog port A0
# GrovePi Port A0 uses Arduino pins 0 and 1
# GrovePi Port A1 uses Arduino pins 1 and 2
# Don't plug anything into port A1 that uses pin 1
# Most Grove sensors only use 3 of their 4 pins, which is why the GrovePi shares Arduino pins between adjacent ports
# If the sensor has a pin definition SIG,NC,VCC,GND, the second (white) pin is not connected to anything
# If you wish to connect two joysticks, use ports A0 and A2 (skip A1)
# Uses two pins - one for the X axis and one for the Y axis
# This configuration means you are using port A0
xPin = 0
yPin = 1
grovepi.pinMode(xPin,"INPUT")
grovepi.pinMode(yPin,"INPUT")
# The Grove Thumb Joystick is an analog device that outputs analog signal ranging from 0 to 1023
# The X and Y axes are two ~10k potentiometers and a momentary push button which shorts the x axis
# My joystick produces slightly different results to the specifications found on the url above
# I've listed both here:
# Specifications
# Min Typ Max Click
# X 206 516 798 1023
# Y 203 507 797
# My Joystick
# Min Typ Max Click
# X 253 513 766 1020-1023
# Y 250 505 769
while True:
try:
# Get X/Y coordinates
x = grovepi.analogRead(xPin)
y = grovepi.analogRead(yPin)
# Calculate X/Y resistance
Rx = (float)(1023 - x) * 10 / x
Ry = (float)(1023 - y) * 10 / y
# Was a click detected on the X axis?
click = 1 if x >= 1020 else 0
print("x =", x, " y =", y, " Rx =", Rx, " Ry =", Ry, " click =", click)
time.sleep(.5)
except IOError:
print ("Error")
|
google/upvote_py2
|
common/ng_template.py
|
Python
|
apache-2.0
| 1,657
| 0.01026
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/
|
LICENSE-2.0
#
# Unless required by applicable law or agreed to in wri
|
ting, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invokes html2js and appends goog.provide.
https://www.npmjs.com/package/html2js
"""
import subprocess
import sys
def main(argv):
# path to html2js
html2js = argv[1]
# A string that will be stripped out of every filename in the template id.
strip_prefix = argv[2]
# A string to prepend to template paths.
prepend_prefix = argv[3]
# Name of AngularJS module that needs to be created.
module_name = argv[4]
# goog module name.
goog_provide = argv[5]
# remaining args interpreted as html location.
html_paths = argv[6:]
result = ["goog.provide('{}');".format(goog_provide)]
for src in html_paths:
assert src.startswith(strip_prefix)
js = subprocess.check_output([html2js, src, '--module', module_name],
env={})
template_name = prepend_prefix + src[len(strip_prefix):]
js = js.replace(src, template_name)
result.append(js)
result.append("{} = angular.module('{}');".format(goog_provide, module_name))
print '\n'.join(result)
if __name__ == '__main__':
main(sys.argv)
|
FrodeSolheim/fs-uae-launcher
|
amitools/vamos/libcore/create.py
|
Python
|
gpl-2.0
| 3,188
| 0.008783
|
from amitools.vamos.astructs import LibraryStruct
from amitools.vamos.atypes import Library, NodeType
from amitools.fd import read_lib_fd, generate_fd
from .vlib import VLib
from .stub import LibStubGen
from .patch import LibPatcherMultiTrap
from .impl import LibImplScanner
class LibCreator(object):
"""create a vamos internal libs"""
def __init__(self, alloc, traps,
fd_dir=None,
log_missing=None, log_valid=None,
lib_profiler=None):
self.alloc = alloc
self.traps = traps
# options
self.fd_dir = fd_dir
self.profiler = lib_profiler
self.stub_gen = LibStubGen(log_missing=log_missing, log_valid=log_valid)
def _create_library(self, info, is_dev, fd):
if is_dev:
ltype = NodeType.NT_DEVICE
else:
ltype = NodeType.NT_LIBRARY
name = info.get_name()
id_str = info.get_id_string()
neg_size = info.get_neg_size()
pos_size = info.get_pos_size()
library = Library.alloc(self.alloc, name, id_str, neg_size, pos_size, fd)
version = info.get_version()
revision = info.get_revision()
library.setup(version=version, revision=revision, type=ltype)
return library
def _generate_fake_fd(self, name, lib_cfg):
if lib_cfg:
num_calls = lib_cfg.num_fake_funcs
else:
num_calls = 0
return generate_fd(name, num_calls)
def get_profiler(self):
return self.profiler
def create_lib(self, info, ctx, impl=No
|
ne, lib_cfg=None, check=False):
name = info.get_name()
if name.endswith('.device'):
is_dev = True
elif name.endswith('.library'):
is_dev = False
else:
raise ValueError("create_lib: %s is neither lib nor dev!" % name)
# get fd: either read from fd or fake one
fd = read_lib_fd(name, self.fd_dir)
if fd is None:
fd = self._generate_fa
|
ke_fd(name, lib_cfg)
# if impl is available scan it
scan = None
if impl:
scanner = LibImplScanner()
if check:
scan = scanner.scan_checked(name, impl, fd, True)
else:
scan = scanner.scan(name, impl, fd, True)
# add profile?
if self.profiler:
# get some impl information
if scan:
func_tags = scan.get_func_tags()
else:
func_tags = None
profile = self.profiler.create_profile(name, fd, func_tags)
else:
profile = None
# create stub
if scan is None:
stub = self.stub_gen.gen_fake_stub(name, fd, ctx, profile)
struct = LibraryStruct
else:
stub = self.stub_gen.gen_stub(scan, ctx, profile)
struct = impl.get_struct_def()
# adjust info pos/neg size
if info.pos_size == 0:
info.pos_size = struct.get_size()
if info.neg_size == 0:
info.neg_size = fd.get_neg_size()
# allocate and init lib
library = self._create_library(info, is_dev, fd)
addr = library.get_addr()
# patcher
patcher = LibPatcherMultiTrap(self.alloc, self.traps, stub)
patcher.patch_jump_table(addr)
# fix lib sum
library.update_sum()
# create vamos lib and combine all pieces
vlib = VLib(library, info, struct, fd, impl,
stub, ctx, patcher, profile, is_dev)
return vlib
|
akhilaananthram/nupic.research
|
sound_encoder/live_sound_encoding_demo.py
|
Python
|
gpl-3.0
| 2,476
| 0.011712
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
import wave
from sound_encoder import SoundEncoder
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 10
def getAudioStream():
p = pyaudio.PyAudio()
return p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
def transformData(data, window):
return np.array(wave.struct.unpack("%dh"%(len(data)/CHANNELS)
|
,\
|
data))*window
def visualizeSDRs(sdrs):
sdrsToVisualize = []
for sdr in sdrs:
sdrsToVisualize.append([255 if x else 0 for x in sdr])
imageArray = np.rot90(np.array(sdrsToVisualize))
plt.imshow(imageArray, cmap='Greys', interpolation='nearest')
plt.show()
def recordAndEncode(stream, soundEncoder):
window = np.blackman(CHANNELS*CHUNK)
sdrs = []
print "---recording---"
for _ in range(0, (RATE/CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
transformedData = transformData(data, window)
sdr = soundEncoder.encode(transformedData)
sdrs.append(sdr)
stream.stop_stream()
stream.close()
print "---done---"
return sdrs
if __name__ == "__main__":
n = 300
w = 31
minval = 20
maxval = 10000
soundEncoder = SoundEncoder(n, w, RATE, CHUNK, minval, maxval)
stream = getAudioStream()
sdrs = recordAndEncode(stream, soundEncoder)
visualizeSDRs(sdrs)
|
sio2project/filetracker
|
filetracker/tests/interaction_test.py
|
Python
|
gpl-3.0
| 7,181
| 0.000418
|
"""Integration tests for client-server interaction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Process
import os
import shutil
import tempfile
import time
import unittest
from filetracker.client import Client, FiletrackerError
from filetracker.servers.run import main as server_main
_TEST_PORT_NUMBER = 45735
class InteractionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_dir = tempfile.mkdtemp()
cls.server_dir = tempfile.mkdtemp()
cls.temp_dir = tempfile.mkdtemp()
cls.server_process = Process(target=_start_server, args=(cls.server_dir,))
cls.server_process.start()
time.sleep(2) # give server some time to start
cls.client = Client(
cache_dir=cls.cache_dir,
remote_url='http://127.0.0.1:{}'.format(_TEST_PORT_NUMBER),
)
@classmethod
def tearDownClass(cls):
cls.server_process.terminate()
shutil.rmtree(cls.cache_dir)
shutil.rmtree(cls.server_dir)
shutil.rmtree(cls.temp_dir)
def setUp(self):
# Shortcuts for convenience
self.cache_dir = InteractionTest.cache_dir
self.server_dir = InteractionTest.server_dir
self.temp_dir = InteractionTest.temp_dir
self.client = InteractionTest.client
def test_put_file_should_save_file_both_locally_and_remotely(self):
temp_file = os.path.join(self.temp_dir, 'put.txt')
with open(temp_file, 'w') as tf:
tf.write('hello')
self.client.put_file('/put.txt', temp_file)
cache_path = os.path.join(self.cache_dir, 'files', 'put.txt')
remote_path = os.path.join(self.server_dir, 'links', 'put.txt')
self.assertTrue(os.path.exists(cache_path))
self.assertTrue(os.path.exists(remote_path))
with open(cache_path, 'r') as cf:
self.assertEqual(cf.read(), 'hello')
rf, _ = self.client.get_stream('/put.txt')
self.assertEqual(rf.read(), b'hello')
def test_get_file_should_raise_error_if_file_doesnt_exist(self):
temp_file = os.path.join(self.temp_dir, 'get_doesnt_exist.txt')
with self.assertRaises(FiletrackerError):
self.client.get_file('/doesnt_exist', temp_file)
def test_get_file_should_save_file_contents_to_destination(self):
src_file = os.path.join(self.temp_dir, 'get_src.txt')
dest_file = os.path.join(self.temp_dir, 'get_dest.txt')
with open(src_file, 'w') as sf:
sf.write('hello')
self.client.put_file('/get.txt', src_file)
self.client.get_file('/get.txt', dest_file)
with open(dest_file, 'r') as df:
self.assertEqual(df.read(), 'hello')
def test_get_stream_should_return_readable_stream(self):
src_file = os.path.join(self.temp_dir, 'streams.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello streams')
self.client.put_file('/streams.txt', src_file)
f, _ = self.client.get_stream('/streams.txt')
sel
|
f.assertEqual(f.read(),
|
b'hello streams')
def test_big_files_should_be_handled_correctly(self):
# To be more precise, Content-Length header should be
# set to the actual size of the file.
src_file = os.path.join(self.temp_dir, 'big.txt')
with open(src_file, 'wb') as sf:
sf.write(b'r')
for _ in range(1024 * 1024):
sf.write(b'ee')
self.client.put_file('/big.txt', src_file)
f, _ = self.client.get_stream('/big.txt')
with open(src_file, 'rb') as sf:
self.assertEqual(sf.read(), f.read())
def test_file_version_should_be_set_to_current_time_on_upload(self):
src_file = os.path.join(self.temp_dir, 'version.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello version')
os.utime(src_file, (1, 1))
pre_upload = int(time.time())
self.client.put_file('/version.txt', src_file)
post_upload = int(time.time())
version = self.client.file_version('/version.txt')
self.assertNotEqual(version, 1)
self.assertTrue(pre_upload <= version <= post_upload)
def test_file_size_should_return_decompressed_size_without_cache(self):
src_file = os.path.join(self.temp_dir, 'size.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello size') # size = 10
self.client.put_file('/size.txt', src_file, to_local_store=False)
self.assertEqual(self.client.file_size('/size.txt'), len(b'hello size'))
def test_every_link_should_have_independent_version(self):
src_file = os.path.join(self.temp_dir, 'foo.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello foo')
self.client.put_file('/foo_a.txt', src_file)
time.sleep(1)
self.client.put_file('/foo_b.txt', src_file)
version_a = self.client.file_version('/foo_a.txt')
version_b = self.client.file_version('/foo_b.txt')
self.assertNotEqual(version_a, version_b)
def test_put_older_should_fail(self):
"""This test assumes file version is stored in mtime."""
src_file = os.path.join(self.temp_dir, 'older.txt')
with open(src_file, 'wb') as sf:
sf.write(b'version 1')
self.client.put_file('/older.txt@1', src_file)
with open(src_file, 'wb') as sf:
sf.write(b'version 2')
self.client.put_file('/older.txt@2', src_file)
with open(src_file, 'wb') as sf:
sf.write(b'version 3 (1)')
self.client.put_file('/older.txt@1', src_file)
f, _ = self.client.get_stream('/older.txt')
self.assertEqual(f.read(), b'version 2')
with self.assertRaises(FiletrackerError):
self.client.get_stream('/older.txt@1')
def test_get_nonexistent_should_404(self):
with self.assertRaisesRegexp(FiletrackerError, "404"):
self.client.get_stream('/nonexistent.txt')
def test_delete_nonexistent_should_404(self):
with self.assertRaisesRegexp(FiletrackerError, "404"):
self.client.delete_file('/nonexistent.txt')
def test_delete_should_remove_file_and_dir(self):
src_file = os.path.join(self.temp_dir, 'del.txt')
with open(src_file, 'wb') as sf:
sf.write(b'test')
self.client.put_file('/dir/del.txt', src_file)
self.client.delete_file('/dir/del.txt')
for d in (self.cache_dir, self.server_dir):
for f in ('files', 'locks'):
self.assertFalse(
os.path.exists(os.path.join(d, f, 'dir')),
"{}/{}/dir not deleted ({})".format(
d, f, d == self.cache_dir and "cache" or "server"
),
)
with self.assertRaisesRegexp(FiletrackerError, "404"):
self.client.get_stream('/dir/del.txt')
def _start_server(server_dir):
server_main(
['-p', str(_TEST_PORT_NUMBER), '-d', server_dir, '-D', '--workers', '4']
)
|
zozo123/buildbot
|
master/buildbot/test/unit/test_db_migrate_versions_021_fix_postgres_sequences.py
|
Python
|
gpl-3.0
| 2,676
| 0.000747
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
from buildbot.test.util import migration
from twisted.trial import unittest
class Migration(migration.MigrateTestMixin, unittest.TestCase):
def setUp(self):
return self.setUpMigrateTest()
def tearDown(self):
return self.tearDownMigrateTest()
cols = [
'buildrequests.id',
'builds.id',
'buildsets.id',
'changes.changeid',
'patches.id',
'sourcestampsets.id',
'sourcestamps.id',
'objects.id',
'users.uid',
]
# tests
def test_update(self):
def setup_thd(conn):
metadata = sa.MetaData()
metadata.bind = conn
# insert a row i
|
nto each table, giving an explicit id column so
# that the sequence is not advanced correctly, but leave no rows in
# one table to test that corner case
for i, col in enumerate(self.cols):
tbl_name, col_name = col.split('.')
tbl = sa.Table(tbl_name, metadata,
|
sa.Column(col_name, sa.Integer, primary_key=True))
tbl.create()
if i > 1:
conn.execute(tbl.insert(), {col_name: i})
def verify_thd(conn):
metadata = sa.MetaData()
metadata.bind = conn
# try inserting *without* an ID, and verify that the resulting ID
# is as expected
for i, col in enumerate(self.cols):
tbl_name, col_name = col.split('.')
tbl = sa.Table(tbl_name, metadata,
sa.Column(col_name, sa.Integer, primary_key=True))
r = conn.execute(tbl.insert(), {})
if i > 1:
exp = i + 1
else:
exp = 1
self.assertEqual(r.inserted_primary_key[0], exp)
return self.do_test_migration(20, 21, setup_thd, verify_thd)
|
dmlc/tvm
|
tests/python/topi/python/test_topi_qnn.py
|
Python
|
apache-2.0
| 6,744
| 0.001779
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for QNN operators."""
import numpy as np
import tvm
from tvm import topi, relay, te
from tvm.contrib import graph_executor
import tvm.topi.testing
def verify_simulated_quantize(data_shape, out_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(size=data_shape).astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
q_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
q = tvm.nd.array(q_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype="float32")
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_q_op = relay.qnn.op.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_q_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q)
func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name="sim_quantize")
func(a, d, s, z, q)
# Check correctness against the true qnn output.
mismatch = q.numpy() != real_q_out.numpy().astype("float32")
# Allow some rounding errors due to GPU fp32 arithmetic.
assert np.sum(mismatch) <= 3
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_quantize():
verify_simulated_quantize([1], "int8", [1], -1)
verify_simulated_quantize([2, 5], "int8", [5], 1)
verify_simulated_quantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_quantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_quantize([2, 5], "int32", [5], 1)
def verify_simulated_dequantize(data_shape, in_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype)
a_np_f = a_np.astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
dq_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np_f, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
dq = tvm.nd.array(dq_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype=in_dtype)
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_dq_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_dq_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ)
func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name="sim_quantize")
func(a, d, s, z, dq)
# Check correctness against the true qnn
|
output.
tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_dequantize():
verify_simulated_dequantize([1], "int8", [1], -1)
verify_simulated_dequantize([2, 5], "int8", [5], 1)
verify_simulated_dequantize([2, 5], "int8", [2], 0)
verify_simulated_dequantize([1, 32, 32, 32], "int8", [32], -1)
verify_simula
|
ted_dequantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_dequantize([2, 5], "int32", [5], 1)
if __name__ == "__main__":
test_simulated_quantize()
test_simulated_dequantize()
|
sckott/pygbif
|
test/test-registry-datasets.py
|
Python
|
mit
| 952
| 0
|
"""Tests for registry module - datasets method"""
import vcr
from pygbif import registry
@vcr.use_cassette("test/vcr_cassettes/test_datasets.yaml")
def test_datasets():
"registry.datasets - basic test"
res = registry.datasets()
assert dict == res.__class__
@vcr.use_cassette("test/vcr_cassettes/test_datasets_limit.yaml")
def test_datasets_limit():
"registry.datasets - limit param"
res = registry.datasets(limit=1)
assert dict == res.__class__
assert 1 == len(res["res
|
ults"])
res = registry.datasets(limit=3)
assert dict == res.__class__
assert 3 == len(res["results"])
|
@vcr.use_cassette("test/vcr_cassettes/test_datasets_type.yaml")
def test_datasets_type():
"registry.datasets - type param"
res = registry.datasets(type="OCCURRENCE")
vv = [x["type"] for x in res["results"]]
assert dict == res.__class__
assert 100 == len(res["results"])
assert "OCCURRENCE" == list(set(vv))[0]
|
seankelly/buildbot
|
master/buildbot/test/integration/test_configs.py
|
Python
|
gpl-2.0
| 9,493
| 0.000316
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
from twisted.python import util
from twisted.trial import unittest
from buildbot import config
from buildbot.scripts import runner
from buildbot.test.util import dirs
from buildbot.test.util.warnings import assertNotProducesWarnings
from buildbot.test.util.warnings import assertProducesWarnings
from buildbot.worker_transition import DeprecatedWorkerAPIWarning
from buildbot.worker_transition import DeprecatedWorkerNameWarning
class RealConfigs(dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('basedir')
self.basedir = os.path.abspath('basedir')
self.filename = os.path.abspath("test.cfg")
def tearDown(self):
self.tearDownDirs()
def test_sample_config(self):
filename = util.sibpath(runner.__file__, 'sample.cfg')
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
config.FileLoader(self.basedir, filename).loadConfig()
def test_0_9_0b5_api_renamed_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_9_0b5_api_renamed)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
config.FileLoader(self.basedir, self.filename).loadConfig()
def test_0_9_0b5_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_9_0b5)
with assertProducesWarnings(
DeprecatedWorkerNameWarning,
messages_patterns=[
r"'buildbot\.plugins\.buildslave' plugins namespace is deprecated",
r"'slavenames' keyword argument is deprecated",
r"c\['slaves'\] key is deprecated"]):
config.FileLoader(self.basedir, self.filename).loadConfig()
def test_0_7_12_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_7_12)
with assertProducesWarnings(
DeprecatedWorkerNameWarning,
messages_patterns=[
r"BuildSlave was deprecated",
r"c\['slavePortnum'\] key is deprecated",
r"'slavename' keyword argument is deprecated",
r"c\['slaves'\] key is deprecated"]):
config.FileLoader(self.basedir, self.filename).loadConfig()
def test_0_7_6_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_7_6)
with assertProducesWarnings(
DeprecatedWorkerNameWarning,
messages_patterns=[
r"BuildSlave was deprecated",
r"c\['slavePortnum'\] key is deprecated",
r"'slavename' keyword argument is deprecated",
r"c\['slaves'\] key is deprecated"]):
config.FileLoader(self.basedir, self.filename).loadConfig()
# sample.cfg from various versions,
|
with comments stripped. Adjustments made
# for compatibility are marked with comments
sample_0_7_6 = """\
c = BuildmasterConfig = {}
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("bot1name", "bot1passwd")]
c['slaveP
|
ortnum'] = 9989
from buildbot.changes.pb import PBChangeSource
c['change_source'] = PBChangeSource()
from buildbot.scheduler import Scheduler
c['schedulers'] = []
c['schedulers'].append(Scheduler(name="all", branch=None,
treeStableTimer=2*60,
builderNames=["buildbot-full"]))
cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot"
cvsmodule = "buildbot"
from buildbot.process import factory
from buildbot.steps.python_twisted import Trial
from buildbot.steps.shell import Compile
from buildbot.steps.source.cvs import CVS
f1 = factory.BuildFactory()
f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", method="copy"))
f1.addStep(Compile(command=["python", "./setup.py", "build"]))
# original lacked testChanges=True; this failed at the time
f1.addStep(Trial(testChanges=True, testpath="."))
b1 = {'name': "buildbot-full",
'slavename': "bot1name",
'builddir': "full",
'factory': f1,
}
c['builders'] = [b1]
c['projectName'] = "Buildbot"
c['projectURL'] = "http://buildbot.sourceforge.net/"
c['buildbotURL'] = "http://localhost:8010/"
"""
sample_0_7_12 = """\
c = BuildmasterConfig = {}
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("bot1name", "bot1passwd")]
c['slavePortnum'] = 9989
from buildbot.changes.pb import PBChangeSource
c['change_source'] = PBChangeSource()
from buildbot.scheduler import Scheduler
c['schedulers'] = []
c['schedulers'].append(Scheduler(name="all", branch=None,
treeStableTimer=2*60,
builderNames=["buildbot-full"]))
cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot"
cvsmodule = "buildbot"
from buildbot.process import factory
# old source is deprecated, so we use the new source
from buildbot.steps.python_twisted import Trial
from buildbot.steps.shell import Compile
from buildbot.steps.source.cvs import CVS
f1 = factory.BuildFactory()
f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", method="copy"))
f1.addStep(Compile(command=["python", "./setup.py", "build"]))
f1.addStep(Trial(testChanges=True, testpath="."))
b1 = {'name': "buildbot-full",
'slavename': "bot1name",
'builddir': "full",
'factory': f1,
}
c['builders'] = [b1]
c['projectName'] = "Buildbot"
c['projectURL'] = "http://buildbot.sourceforge.net/"
c['buildbotURL'] = "http://localhost:8010/"
"""
# Template for master configuration just before worker renaming.
sample_0_9_0b5 = """\
from buildbot.plugins import *
c = BuildmasterConfig = {}
c['slaves'] = [buildslave.BuildSlave("example-slave", "pass")]
c['protocols'] = {'pb': {'port': 9989}}
c['change_source'] = []
c['change_source'].append(changes.GitPoller(
'git://github.com/buildbot/hello-world.git',
workdir='gitpoller-workdir', branch='master',
pollinterval=300))
c['schedulers'] = []
c['schedulers'].append(schedulers.SingleBranchScheduler(
name="all",
change_filter=util.ChangeFilter(branch='master'),
treeStableTimer=None,
builderNames=["runtests"]))
c['schedulers'].append(schedulers.ForceScheduler(
name="force",
builderNames=["runtests"]))
factory = util.BuildFactory()
factory.addStep(steps.Git(repourl='git://github.com/buildbot/hello-world.git', mode='incremental'))
factory.addStep(steps.ShellCommand(command=["trial", "hello"],
env={"PYTHONPATH": "."}))
c['builders'] = []
c['builders'].append(
util.BuilderConfig(name="runtests",
slavenames=["example-slave"],
factory=factory))
c['title'] = "Pyflakes"
c['titleURL'] = "https://launchpad.net/pyflakes"
c['buildbotURL'] = "http://localhost:8020/"
c['www'] = dict(port=8010,
plugins=dict(waterfall_view={}, console_view={}))
c['db'] = {
'db_url' : "sqlite:///state.sqlite",
}
"""
# Template for master configuration just after worker renaming.
sample_0_9_0b5_api_renamed = """\
from buildbot.plugins import *
c = BuildmasterConfig = {}
c['workers'] = [worker.Worker("example-worker", "pass")]
c['protocols'] = {'pb': {'port': 9989}}
c['change_sou
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.