repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
brunobord/critica
|
apps/notes/settings.py
|
Python
|
gpl-3.0
| 652
| 0.001534
|
# -*
|
- coding: utf-8 -*-
"""
Settings of ``critica.apps.notes`` application.
"""
from critica.apps.notes import choices
# Excluded categories
# ------------------------------------------------------------------------------
EXCLUDED_CATEGORIES = [
|
'epicurien',
'voyages',
'regions',
'coup-de-gueule',
]
# Type order
# ------------------------------------------------------------------------------
TYPE_ORDER = [
'vous-saviez',
'fallait-sen-douter',
'ca-cest-fait',
'linfo-off',
'criticons',
'aucun-interet',
'premiere-nouvelle',
'on-sen-serait-passe',
'on-en-rirait-presque',
'ils-ont-ose',
]
|
wakiyamap/electrum-mona
|
electrum_mona/lnaddr.py
|
Python
|
mit
| 18,219
| 0.002744
|
#! /usr/bin/env python3
# This was forked from https://github.com/rustyrussell/lightning-payencode/tree/acc16ec13a3fa1dc16c07af6ec67c261bd8aff23
import re
import time
from hashlib import sha256
from binascii import hexlify
from decimal import Decimal
from typing import Optional, TYPE_CHECKING, Type
import random
import bitstring
from .bitcoin import hash160_to_b58_address, b58_address_to_hash160, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC
from .segwit_addr import bech32_encode, bech32_decode, CHARSET
from . import segwit_addr
from . import constants
from .constants import AbstractNet
from . import ecc
from .bitcoin import COIN
if TYPE_CHECKING:
from .lnutil import LnFeatures
class LnInvoiceException(Exception): pass
class LnDecodeException(LnInvoiceException): pass
class LnEncodeException(LnInvoiceException): pass
# BOLT #11:
#
# A writer MUST encode `amount` as a positive decimal integer with no
# leading zeroes, SHOULD use the shortest representation possible.
def shorten_amount(amount):
""" Given an amount in bitcoin, shorten it
"""
# Convert to pico initially
amount = int(amount * 10**12)
units = ['p', 'n', 'u', 'm']
for unit in units:
if amount % 1000 == 0:
amount //= 1000
else:
break
else:
unit = ''
return str(amount) + unit
def unshorten_amount(amount) -> Decimal:
""" Given a shortened amount, convert it into a decimal
"""
# BOLT #11:
# The following `multiplier` letters are defined:
#
#* `m` (milli): multiply by 0.001
#* `u` (micro): multiply by 0.000001
#* `n` (nano): multiply by 0.000000001
#* `p` (pico): multiply by 0.000000000001
units = {
'p': 10**12,
'n': 10**9,
'u': 10**6,
'm': 10**3,
}
unit = str(amount)[-1]
# BOLT #11:
# A reader SHOULD fail if `amount` contains a non-digit, or is followed by
# anything except a `multiplier` in the table above.
if not re.fullmatch("\\d+[pnum]?", str(amount)):
raise LnDecodeException("Invalid amount '{}'".format(amount))
if unit in units.keys():
return Decimal(amount[:-1]) / units[unit]
else:
return Decimal(amount)
_INT_TO_BINSTR = {a: '0' * (5-len(bin(a)[2:])) + bin(a)[2:] for a in range(32)}
# Bech32 spits out array of 5-bit values. Shim here.
def u5_to_bitarray(arr):
b = ''.join(_INT_TO_BINSTR[a] for a in arr)
return bitstring.BitArray(bin=b)
def bitarray_to_u5(barr):
assert barr.len % 5 == 0
ret = []
s = bitstring.ConstBitStream(barr)
while s.pos != s.len:
ret.append(s.read(5).uint)
return ret
def encode_fallback(fallback: str, net: Type[AbstractNet]):
""" Encode all supported fallback addresses.
"""
wver, wprog_ints = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, fallback)
if wver is not None:
wprog = bytes(wprog_ints)
else:
addrtype, addr = b58_address_to_hash160(fallback)
if addrtype == net.ADDRTYPE_P2PKH:
wver = 17
elif addrtype == net.ADDRTYPE_P2SH:
wver = 18
else:
raise LnEncodeException(f"Unknown address type {addrtype} for {net}")
wprog = addr
return tagged('f', bitstring.pack("uint:5", wver) + wprog)
def parse_fallback(fallback, net: Type[AbstractNet]):
wver = fallback[0:5].uint
if wver == 17:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2PKH)
elif wver == 18:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2SH)
elif wver <= 16:
witprog = fallback[5:] # cut witver
witprog = witprog[:len(witprog) // 8 * 8] # can only be full bytes
witprog = witprog.tobytes()
addr = segwit_addr.encode_segwit_address(net.SEGWIT_HRP, wver, witprog)
else:
return None
return addr
BOLT11_HRP_INV_DICT = {net.BOLT11_HRP: net for net in constants.NETS_LIST}
# Tagged field containing BitArray
def tagged(char, l):
# Tagged fields need to be zero-padded to 5 bits.
while l.len % 5 != 0:
l.append('0b0')
return bitstring.pack("uint:5, uint:5, uint:5",
CHARSET.find(char),
(l.len / 5) / 32, (l.len / 5) % 32) + l
# Tagged field containing bytes
def tagged_bytes(char, l):
return tagged(char, bitstring.BitArray(l))
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
# Discard trailing bits, convert to bytes.
def trim_to_bytes(barr):
# Adds a byte if necessary.
b = barr.tobytes()
if barr.len % 8 != 0:
return b[:-1]
return b
# Try to pull out tagged data: returns tag, tagged data and remainder.
def pull_tagged(stream):
tag = stream.read(5).uint
length = stream.read(5).uint * 32 + stream.read(5).uint
return (CHARSET[tag], stream.read(length * 5), stream)
def lnencode(addr: 'LnAddr', privkey) -> str:
if addr.amount:
amount = addr.net.BOLT11_HRP + shorten_amount(addr.amount)
else:
amount = addr.net.BOLT11_HRP if addr.net else ''
hrp = 'ln' + amount
# Start with the timestamp
data = bitstring.pack('uint:35', addr.date)
tags_set = set()
# Payment hash
data += tagged_bytes('p', addr.paymenthash)
tags_set.add('p')
if addr.payment_secret is not None:
data += tagged_bytes('s', addr.payment_secret)
tags_set.add('s')
for k, v in addr.tags:
# BOLT #11:
#
# A writer MUST NOT include more than one `d`, `h`, `n` or `x` fields,
if k in ('d', 'h', 'n', 'x', 'p', 's'):
if k in tags_set:
raise LnEncodeException("Duplicate '{}' tag".format(k))
if k == 'r':
route = bitstring.BitArray()
for step in v:
pubkey, channel, feebase, feerate, cltv = step
route.append(bitstring.BitArray(pubkey) + bitstring.BitArray(channel) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv))
data += tagged('r', route)
elif k == 't':
pubkey, feebase, feerate, cltv = v
route = bitstring.BitArray(pubkey) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv)
data += tagged('t', route)
elif k == 'f':
data += encode_fallback(v, addr.net)
elif k == 'd':
# truncate to max length: 1024*5 bits = 639 bytes
data += tagged_bytes('d', v.encode()[0:639])
elif k == 'x':
expirybits = bitstring.pack('intbe:64', v)
expirybits = trim_to_min_length(expirybits)
data += tagged('x', expirybits)
elif k == 'h':
data += tagged_bytes('h', sha256(v.encode('utf-8')).digest())
elif k == 'n':
data += tagged_bytes('n', v)
elif k == 'c':
finalcltvbits = bitstring.pack('intbe:64', v)
finalcltvbits = trim_to_min_length(finalcltvbits)
data += tagged('c', finalcltvbits)
eli
|
f k == '9':
if v == 0:
continue
feature_bits = bitstring.BitArray(uint=v, length=v.bit_length())
feature_bits = trim_to_min_length(feature_bits)
|
data += tagged('9', feature_bits)
else:
# FIXME: Support unknown tags?
raise LnEncodeException("Unknown tag {}".format(k))
tags_set.add(k)
# BOLT #11:
#
# A writer MUST include either a `d` or `h` field, and MUST NOT include
# both.
if 'd' in tags_set and 'h' in tags_set:
raise ValueError("Cannot include both 'd' and 'h'")
if not 'd'
|
wuan/klimalogger
|
klimalogger/sensor/sht1x_sensor.py
|
Python
|
apache-2.0
| 1,458
| 0.00206
|
# -*- coding: utf8 -*-
from injector import singleton, inject
try:
import configparser
except ImportError:
import configparser as configparser
from sht1x.Sht1x import Sht1x as SHT1x
@singleton
class Sensor:
name = "SHT1x"
@inject
def __init__(self, config_parser: configparser.ConfigParser):
data_pin = int(config_parser.get('sht1x_sensor', 'data_pin'))
sck_pin = int(config_parser.get('sht1x_sensor', 'sck_pin'))
self.sht1x = SHT1x(dataPin=data_pin, sckPin=sck_pin, gpioMode=SHT1x.GPIO_BCM)
def measure(self, data_builder):
(te
|
mperature, humidity) = self.sht1x.read_temperature_C_and_humidity()
if temperature > -40.0:
try:
dew_point = self.sht1x.calculate_dew_point(temperature, humidity)
dew_point = round(dew_point, 2)
except ValueError:
dew_point = None
temperature = round(temperature, 2)
humidity = round(humidity, 2)
else:
temperature = None
humidity
|
= None
dew_point = None
if temperature and humidity and dew_point and -30 < temperature < 80 and 5 < humidity <= 100:
data_builder.add(self.name, "temperature", "°C", temperature)
if dew_point:
data_builder.add(self.name, "dew point", "°C", dew_point, True)
data_builder.add(self.name, "relative humidity", "%", humidity)
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/compute_management_client.py
|
Python
|
mit
| 7,735
| 0.002069
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.availability_sets_operations import AvailabilitySetsOperations
from .operations.virtual_machine_extension_images_operations import VirtualMachineExtensionImagesOperations
from .operations.virtual_machine_extensions_operations import VirtualMachineExtensionsOperations
from .operations.virtual_machine_images_operations import VirtualMachineImagesOperations
from .operations.usage_operations import UsageOperations
from .operations.virtual_machine_sizes_operations import VirtualMachineSizesOperations
from .operations.images_operations import ImagesOperations
from .operations.virtual_machines_operations import VirtualMachinesOperations
from .operations.virtual_machine_scale_sets_operations import VirtualMachineScaleSetsOperations
from .operations.virtual_machine_scale_set_vms_operations import VirtualMachineScaleSetVMsOperations
from .operations.container_services_operations import ContainerServicesOperations
from .operations.disks_operations import DisksOperations
from .operations.snapshots_operations import SnapshotsOperations
from . import models
class ComputeManagementClientConfiguration(AzureConfiguration):
"""Configuration for ComputeManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(ComputeManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('computemanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ComputeManagementClient(object):
"""Composite Swagger for Compute Client
:ivar config: Configuration for client.
:vartype config: ComputeManagementClientConfiguration
:ivar availability_sets: AvailabilitySets operations
:vartype availability_sets: .operations.AvailabilitySetsOperations
:ivar virtual_machine_extension_images: VirtualMachineExtensionImages operations
:vartype virtual_machine_extension_images: .operations.VirtualMachineExtensionImagesOperations
:ivar virtual_machine_extensions: VirtualMachineExtensions operations
:vartype virtual_machine_extensions: .operations.VirtualMachineExtensionsOperations
:ivar virtual_machine_images: VirtualMachineImages operations
:vartype virtual_machine_images: .operations.VirtualMachineImagesOperations
:ivar usage: Usage operations
:vartype usage: .operations.UsageOperations
:ivar virtual_machine_sizes: VirtualMachineSizes operations
:vartype virtual_machine_sizes: .operations.VirtualMachineSizesOperations
:ivar images: Images operations
:vartype images: .operations.ImagesOperations
:ivar virtual_machines: VirtualMachines operations
:vartype virtual_machines: .operations.VirtualMachinesOperations
:ivar virtual_machine_scale_sets: VirtualMachineScaleSets operations
:vartype virtual_machine_scale_sets: .operations.VirtualMachineScaleSetsOperations
:ivar virtual_machine_scale_set_vms: VirtualMachineScaleSetVMs operations
:vartype virtual_machine_scale_set_vms: .operations.VirtualMachineScaleSetVMsOperations
:ivar container_services: ContainerServices operations
:vartype container_services: .operations.ContainerServicesOperations
:ivar disks: Disks operations
:vartype disks: .operations.DisksOperations
:ivar snapshots: Snapshots operations
:vartype snapshots: .operations.SnapshotsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ComputeManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.availability_sets = AvailabilitySetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_extension_images = VirtualMachineExtensionImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_extensions = VirtualMachineExtensionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_images = VirtualMachineImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
|
self.usage = UsageOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.images = ImagesOperat
|
ions(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_sets = VirtualMachineScaleSetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vms = VirtualMachineScaleSetVMsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.container_services = ContainerServicesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self.config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self.config, self._serialize, self._deserialize)
|
Alwnikrotikz/numexpr
|
bench/vml_timing.py
|
Python
|
mit
| 5,758
| 0.002431
|
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
import sys
import timeit
import numpy
import numexpr
array_size = 1000*1000
iterations = 10
numpy_ttime = []
numpy_sttime = []
numpy_nttime = []
numexpr_ttime = []
numexpr_sttime = []
numexpr_nttime = []
def compare_times(expr, nexpr):
global numpy_ttime
global numpy_sttime
global numpy_nttime
global numexpr_ttime
global numexpr_sttime
global numexpr_nttime
print "******************* Expression:", expr
setup_contiguous = setupNP_contiguous
setup_strided = setupNP_strided
setup_unaligned = setupNP_unaligned
numpy_timer = timeit.Timer(expr, setup_contiguous)
numpy_time = round(numpy_timer.timeit(number=iterations), 4)
numpy_ttime.append(numpy_time)
print '%30s %.4f'%('numpy:', numpy_time / iterations)
numpy_timer = timeit.Timer(expr, setup_strided)
numpy_stime = round(numpy_timer.timeit(number=iterations), 4)
numpy_sttime.append(numpy_stime)
print '%30s %.4f'%('numpy strided:', numpy_stime / iterations)
numpy_timer = timeit.Timer(expr, setup_unaligned)
numpy_ntime = round(numpy_timer.timeit(number=iterations), 4)
numpy_nttime.append(numpy_ntime)
print '%30s %.4f'%('numpy unaligned:', numpy_ntime / iterations)
evalexpr = 'evaluate("%s", optimization="aggressive")' % expr
numexpr_timer = timeit.Timer(evalexpr, setup_contiguous)
numexpr_time = round(numexpr_timer.timeit(number=iterations), 4)
numexpr_ttime.append(numexpr_time)
print '%30s %.4f'%("numexpr:", numexpr_time/iterations,),
print "Speed-up of numexpr over numpy:", round(numpy_time/numexpr_time, 4)
evalexpr = 'evaluate("%s", optimization="aggressive")' % expr
numexpr_timer = timeit.Timer(evalexpr, setup_strided)
numexpr_stime = round(numexpr_timer.timeit(number=iterations), 4)
numexpr_sttime.append(numexpr_stime)
print '%30s %.4f'%("numexpr strided:", numexpr_stime/iterations,),
print "Speed-up of numexpr over numpy:", \
round(numpy_stime/numexpr_stime, 4)
evalexpr = 'evaluate("%s", optimization="aggressive")' % expr
numexpr_timer = timeit.Timer(evalexpr, setup_unaligned)
numexpr_ntime = round(numexpr_timer.timeit(number=iterations), 4)
numexpr_nttime.append(numexpr_ntime)
print '%30s %.4f'%("numexpr unaligned:", numexpr_ntime/iterations,),
print "Speed-up of numexpr over numpy:", \
round(numpy_ntime/numexpr_ntime, 4)
print
setupNP = """\
from numpy import arange, linspace, arctan2, sqrt, sin, cos, exp, log
from numpy import rec as records
#from numexpr import evaluate
from numexpr import %s
# Initialize a recarray of 16 MB in size
r=records.array(None, formats='a%s,i4,f4,f8', shape=%s)
c1 = r.field('f0')%s
i2 = r.field('f1')%s
f3 = r.field('f2')%s
f4 = r.field('f3')%s
c1[:] = "a"
i2[:] = arange(%s)/1000
f3[:] = linspace(0,1,len(i2))
f4[:] = f3*1.23
"""
eval_method = "evaluate"
setupNP_contiguous = setupNP % ((eval_method, 4, array_size,) + \
(".copy()",)*4 + \
(array_size,))
setupNP_strided = setupNP % (eval_method, 4, array_size,
"", "", "", "", array_size)
setupNP_unaligned = setupNP % (ev
|
al_method, 1, array_size,
"", "", "", "", array_size)
expressions = []
expressions.append('i2 > 0')
expressions.append('f3+f4')
expressions.append('f3+i2')
expressions.append('exp(f3)')
expressions.append('log(exp(f3)+1)/f4')
expressions.append('0.1*i2 > arctan2(f3, f4)')
expressions.append('sqrt(f3**2 + f4**2) > 1')
expressions.append('sin(f3)>cos(f4)')
expressions.append('f3**f4')
def compare(expres
|
sion=False):
if expression:
compare_times(expression, 1)
sys.exit(0)
nexpr = 0
for expr in expressions:
nexpr += 1
compare_times(expr, nexpr)
print
if __name__ == '__main__':
import numexpr
numexpr.print_versions()
numpy.seterr(all='ignore')
numexpr.set_vml_accuracy_mode('low')
numexpr.set_vml_num_threads(2)
if len(sys.argv) > 1:
expression = sys.argv[1]
print "expression-->", expression
compare(expression)
else:
compare()
tratios = numpy.array(numpy_ttime) / numpy.array(numexpr_ttime)
stratios = numpy.array(numpy_sttime) / numpy.array(numexpr_sttime)
ntratios = numpy.array(numpy_nttime) / numpy.array(numexpr_nttime)
print "eval method: %s" % eval_method
print "*************** Numexpr vs NumPy speed-ups *******************"
# print "numpy total:", sum(numpy_ttime)/iterations
# print "numpy strided total:", sum(numpy_sttime)/iterations
# print "numpy unaligned total:", sum(numpy_nttime)/iterations
# print "numexpr total:", sum(numexpr_ttime)/iterations
print "Contiguous case:\t %s (mean), %s (min), %s (max)" % \
(round(tratios.mean(), 2),
round(tratios.min(), 2),
round(tratios.max(), 2))
# print "numexpr strided total:", sum(numexpr_sttime)/iterations
print "Strided case:\t\t %s (mean), %s (min), %s (max)" % \
(round(stratios.mean(), 2),
round(stratios.min(), 2),
round(stratios.max(), 2))
# print "numexpr unaligned total:", sum(numexpr_nttime)/iterations
print "Unaligned case:\t\t %s (mean), %s (min), %s (max)" % \
(round(ntratios.mean(), 2),
round(ntratios.min(), 2),
round(ntratios.max(), 2))
|
rodo/django-perf
|
foo/tuna/management/commands/tuna_delete_direct.py
|
Python
|
gpl-3.0
| 1,873
| 0.000534
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013,2014 Rodolphe Quiédeville <rodolphe@quiedeville.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from optparse import make_option
from random import randrange
import time
import sys
from django.core.management.base import BaseCommand
from foo.tuna.models import Book, Editor, Author, Company, Sinopsis
import utils
class Command(BaseCommand):
help = 'Import datas'
option_list = BaseCommand.option_list + (
make_option("-c",
"--code",
dest="code",
type="int",
help="number of values to input",
default=1),
)
def handle(self, *args, **options):
"""Lookup some objects
"""
code = options['code']
self.doit(code, Book, 'Book')
self.doit(code, Company, 'Company')
def doit(self, code, model, name):
print "{} : {}".format(name, model.objects.all().count())
# remove 10% of tuples, be in first
(count, delta) = utils.direct_delete(code, m
|
odel)
utils.print
|
_console('direct_delete', count, delta)
print "{} : {}".format(name, model.objects.all().count())
|
Lilykos/invenio
|
invenio/modules/jsonalchemy/jsonext/engines/cache.py
|
Python
|
gpl-2.0
| 4,277
| 0.00187
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even
|
the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free S
|
oftware Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Wrapper for *Flask-Cache* as engine for *JSONAlchemy*."""
import six
from invenio.ext.cache import cache
from invenio.modules.jsonalchemy.storage import Storage
class CacheStorage(Storage):
"""Implement storage engine for Flask-Cache useful for testing."""
def __init__(self, **kwargs):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.__init__`."""
self._prefix = kwargs.get('model', '')
def _set(self, data):
self._keys = self._keys | set([data['_id']])
cache.set(self._prefix + data['_id'], data, timeout=99999)
def _get(self, id):
value = cache.get(self._prefix + id)
if value is None:
raise KeyError()
return value
@property
def _keys(self):
return cache.get(self._prefix + '::keys') or set()
@_keys.setter
def _keys(self, value):
cache.set(self._prefix + '::keys', value)
def save_one(self, data, id=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.save_one`."""
if id is not None:
data['_id'] = id
self._set(data)
return data
def save_many(self, jsons, ids=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.save_many`."""
return map(lambda k: self.save_one(*k), zip(jsons, ids))
def update_one(self, data, id=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.update_one`."""
if id is not None:
data['_id'] = id
id = data['_id']
old_data = self._get(id)
old_data.update(data)
self._set(old_data)
return old_data
def update_many(self, jsons, ids=None):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.update_many`."""
return map(lambda k: self.update_one(*k), zip(jsons, ids))
def get_one(self, id):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_one`."""
return self._get(id)
def get_many(self, ids):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_many`."""
return map(self.get_one, ids)
def get_field_values(self, ids, field, repetitive_values=True, count=False,
include_recid=False, split_by=0):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_field_values`."""
raise NotImplementedError()
def get_fields_values(self, ids, fields, repetitive_values=True,
count=False, include_recid=False, split_by=0):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.get_fields_values`."""
raise NotImplementedError()
def search(self, query):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.search`."""
def _find(item):
for k, v in six.iteritems(query):
if item is None:
return False
test_v = item.get(k)
if test_v is None and v is not None:
return False
elif test_v != v:
return False
return True
return filter(_find, map(self._get, self._keys))
def create(self):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.create`."""
assert len(self._keys) == 0
def drop(self):
"""See :meth:`~invenio.modules.jsonalchemy.storage.Storage.create`."""
while self._keys:
cache.delete(self._prefix + self._keys.pop())
|
compsoc-ssc/compsocssc
|
general/models.py
|
Python
|
mit
| 2,517
| 0.005165
|
from django.db import models
import warnings
from django.utils import timezone
import requests
from image_cropping import ImageRatioField
class CompMember(models.Model):
"""A member of compsoc"""
class Meta:
verbose_name = 'CompSoc Member'
verbose_name_plural = 'CompSoc Members'
index = models.IntegerField(blank=False, help_text="This field is present just for ordering members based on their posts. President = 2, VPs = 1, Gen. Sec. = 0, Everyone else = -1", default=-1)
name = models.CharField(max_length=50, help_text='Enter your full name')
image = models.ImageField(blank=False, upload_to='member_images/', help_text='Please select a display image for yourself. This is necessary.')
cropping = ImageRatioField('image', '500x500')
alumni = models.BooleanField(default=False, help_text='Are you an alumni?')
role = models.CharField(max_length=100, help_text="Enter your post if you hold one. If not, enter 'Member'")
batch_of = models.CharField(max_length=4, default='2015', help_text='Enter the year you will graduate')
social_link = models.CharField(blank=True, max_length=256, help_text='Enter a link to your Facebook, Twitter, GitHub or any other social network profile. You can leave this blank if you wish!')
def get_social_link(self):
'''
Returns the social_link if present. Otherwise, sends javascript:void(0)
'''
if self.social_link == '':
return 'javascript:void(0)'
else:
return self.social_link
def __str__(self):
return self.name
class Variable(models.Model): ##NOTE: This should not be used anymore
def __str__(self):
warnings.warn('''You are using a "General Variable".
Stop doing that.
This is bad
|
design on Arjoonn's
|
part so don't fall into the same trap.
If you are using this for Orfik, that has already been fixed. If you are using this for logos, same thing.
Over a few cycles this entire table will be removed.
''')
return self.name
name = models.CharField(max_length=100)
time = models.DateTimeField()
# Receive the pre_delete signal and delete the image associated with the model instance.
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
@receiver(pre_delete, sender=CompMember)
def compsoc_member_delete(sender, instance, **kwargs):
# Pass false so ImageField doesn't save the model.
instance.image.delete(False)
|
Eldinnie/python-telegram-bot
|
tests/test_video.py
|
Python
|
gpl-3.0
| 7,707
| 0.000389
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implie
|
d warranty of
# MERCHANTABILITY or FITNESS FOR A PA
|
RTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import os
import pytest
from flaky import flaky
from telegram import Video, TelegramError, Voice, PhotoSize
@pytest.fixture(scope='function')
def video_file():
f = open('tests/data/telegram.mp4', 'rb')
yield f
f.close()
@pytest.fixture(scope='class')
def video(bot, chat_id):
with open('tests/data/telegram.mp4', 'rb') as f:
return bot.send_video(chat_id, video=f, timeout=50).video
class TestVideo(object):
width = 360
height = 640
duration = 5
file_size = 326534
mime_type = 'video/mp4'
supports_streaming = True
caption = u'<b>VideoTest</b> - *Caption*'
video_file_url = 'https://python-telegram-bot.org/static/testfiles/telegram.mp4'
def test_creation(self, video):
# Make sure file has been uploaded.
assert isinstance(video, Video)
assert isinstance(video.file_id, str)
assert video.file_id is not ''
assert isinstance(video.thumb, PhotoSize)
assert isinstance(video.thumb.file_id, str)
assert video.thumb.file_id is not ''
def test_expected_values(self, video):
assert video.width == self.width
assert video.height == self.height
assert video.duration == self.duration
assert video.file_size == self.file_size
assert video.mime_type == self.mime_type
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_all_args(self, bot, chat_id, video_file, video):
message = bot.send_video(chat_id, video_file, duration=self.duration,
caption=self.caption, supports_streaming=self.supports_streaming,
disable_notification=False, width=video.width,
height=video.height, parse_mode='Markdown')
assert isinstance(message.video, Video)
assert isinstance(message.video.file_id, str)
assert message.video.file_id != ''
assert message.video.width == video.width
assert message.video.height == video.height
assert message.video.duration == video.duration
assert message.video.file_size == video.file_size
assert isinstance(message.video.thumb, PhotoSize)
assert isinstance(message.video.thumb.file_id, str)
assert message.video.thumb.file_id != ''
assert message.video.thumb.width == video.thumb.width
assert message.video.thumb.height == video.thumb.height
assert message.video.thumb.file_size == video.thumb.file_size
assert message.caption == self.caption.replace('*', '')
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_get_and_download(self, bot, video):
new_file = bot.get_file(video.file_id)
assert new_file.file_size == self.file_size
assert new_file.file_id == video.file_id
assert new_file.file_path.startswith('https://')
new_file.download('telegram.mp4')
assert os.path.isfile('telegram.mp4')
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_mp4_file_url(self, bot, chat_id, video):
message = bot.send_video(chat_id, self.video_file_url, caption=self.caption)
assert isinstance(message.video, Video)
assert isinstance(message.video.file_id, str)
assert message.video.file_id != ''
assert message.video.width == video.width
assert message.video.height == video.height
assert message.video.duration == video.duration
assert message.video.file_size == video.file_size
assert isinstance(message.video.thumb, PhotoSize)
assert isinstance(message.video.thumb.file_id, str)
assert message.video.thumb.file_id != ''
assert message.video.thumb.width == video.thumb.width
assert message.video.thumb.height == video.thumb.height
assert message.video.thumb.file_size == video.thumb.file_size
assert message.caption == self.caption
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_resend(self, bot, chat_id, video):
message = bot.send_video(chat_id, video.file_id)
assert message.video == video
def test_send_with_video(self, monkeypatch, bot, chat_id, video):
def test(_, url, data, **kwargs):
return data['video'] == video.file_id
monkeypatch.setattr('telegram.utils.request.Request.post', test)
message = bot.send_video(chat_id, video=video)
assert message
def test_de_json(self, bot):
json_dict = {
'file_id': 'not a file id',
'width': self.width,
'height': self.height,
'duration': self.duration,
'mime_type': self.mime_type,
'file_size': self.file_size
}
json_video = Video.de_json(json_dict, bot)
assert json_video.file_id == 'not a file id'
assert json_video.width == self.width
assert json_video.height == self.height
assert json_video.duration == self.duration
assert json_video.mime_type == self.mime_type
assert json_video.file_size == self.file_size
def test_to_dict(self, video):
video_dict = video.to_dict()
assert isinstance(video_dict, dict)
assert video_dict['file_id'] == video.file_id
assert video_dict['width'] == video.width
assert video_dict['height'] == video.height
assert video_dict['duration'] == video.duration
assert video_dict['mime_type'] == video.mime_type
assert video_dict['file_size'] == video.file_size
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file(self, bot, chat_id):
with pytest.raises(TelegramError):
bot.send_video(chat_id, open(os.devnull, 'rb'))
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file_id(self, bot, chat_id):
with pytest.raises(TelegramError):
bot.send_video(chat_id, '')
def test_error_without_required_args(self, bot, chat_id):
with pytest.raises(TypeError):
bot.send_video(chat_id=chat_id)
def test_get_file_instance_method(self, monkeypatch, video):
def test(*args, **kwargs):
return args[1] == video.file_id
monkeypatch.setattr('telegram.Bot.get_file', test)
assert video.get_file()
def test_equality(self, video):
a = Video(video.file_id, self.width, self.height, self.duration)
b = Video(video.file_id, self.width, self.height, self.duration)
c = Video(video.file_id, 0, 0, 0)
d = Video('', self.width, self.height, self.duration)
e = Voice(video.file_id, self.duration)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
|
oddbird/gurtel
|
tests/test_util.py
|
Python
|
bsd-3-clause
| 1,076
| 0
|
from gurtel.util import Url
class TestUrl(object):
def equal(self, one, two):
"""
For this test, want to ensure that compare-equal implies hash-equal.
"""
return (one == two) and (hash(one) == hash(tw
|
o))
def test_no_qs(self):
assert self.equal(
Url("http://fake.base/path/"),
Url("http://fake.base/path/"))
def test_same_qs(self):
assert self.equal(
Url("http://fake.base/path/?foo=bar"),
Url("http://fake.base/path/?foo=bar"))
def test_different_key_order(self):
assert self.eq
|
ual(
Url("http://fake.base/path/?foo=bar&arg=yo"),
Url("http://fake.base/path/?arg=yo&foo=bar"))
def test_different_value_order(self):
assert not self.equal(
Url("http://fake.base/path/?foo=bar&foo=yo"),
Url("http://fake.base/path/?foo=yo&foo=bar"))
def test_repr(self):
assert self.equal(
repr(Url("http://fake.base/path/?foo=bar")),
"Url(http://fake.base/path/?foo=bar)")
|
hrayr-artunyan/shuup
|
shuup_tests/core/test_shipments.py
|
Python
|
agpl-3.0
| 6,394
| 0.001251
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import pytest
from django.conf import settings
from shuup.core.models import Shipment, ShippingStatus, StockBehavior
from shuup.testing.factories import (
add_product_to_order, create_empty_order, create_product,
get_default_shop, get_default_supplier
)
from shuup.utils.excs import Problem
@pytest.mark.django_db
def test_shipment_identifier():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
shipment = order.create_shipment({line.product: 1}, supplier=supplier)
expected_key_start = "%s/%s" % (order.pk, i)
assert shipment.identifier.startswith(expected_key_start)
assert order.shipments.count() == int(line.quantity)
assert order.shipping_status == ShippingStatus.FULLY_SHIPPED # Check that order is now fully shipped
assert not order.can_edit()
@pytest.mark.django_db
def test_shipment_creation_from_unsaved_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
unsaved_shipment = Shipment(order=order, supplier=supplier)
shipment = order.create_shipment({line.product: 1}, shipment=unsaved_shipment)
expected_key_start = "%s/%s" % (order.pk, i)
assert shipment.identifier.startswith(expected_key_start)
assert order.shipments.count() == int(line.quantity)
@pytest.mark.django_db
def test_shipment_creation_without_supplier_and_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i in range(0, int(line.quantity)):
with pytest.raises(AssertionError):
order.create_shipment({line.product: 1})
assert order.shipments.count() == 0
@pytest.mark.django_db
def test_shipment_creation_with_invalid_unsaved_shipment():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
second_order = create_empty_order(shop=shop)
second_order.full_clean()
second_order.save()
product_lines = order.lines.exclude(product_id=None)
for line in product_lines:
for i
|
in range(0, int(line.quantity)):
with pytest.raises(AssertionErro
|
r):
unsaved_shipment = Shipment(supplier=supplier, order=second_order)
order.create_shipment({line.product: 1}, shipment=unsaved_shipment)
assert order.shipments.count() == 0
@pytest.mark.django_db
def test_partially_shipped_order_status():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
assert order.can_edit()
first_product_line = order.lines.exclude(product_id=None).first()
assert first_product_line.quantity > 1
order.create_shipment({first_product_line.product: 1}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert not order.can_edit()
@pytest.mark.django_db
def test_shipment_delete():
shop = get_default_shop()
supplier = get_default_supplier()
order = _get_order(shop, supplier)
assert order.can_edit()
first_product_line = order.lines.exclude(product_id=None).first()
assert first_product_line.quantity > 1
shipment = order.create_shipment({first_product_line.product: 1}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert order.shipments.all().count() == 1
# Test shipment delete
shipment.soft_delete()
assert order.shipments.all().count() == 1
assert order.shipments.all_except_deleted().count() == 0
# Check the shipping status update
assert order.shipping_status == ShippingStatus.NOT_SHIPPED
@pytest.mark.django_db
def test_shipment_with_insufficient_stock():
if "shuup.simple_supplier" not in settings.INSTALLED_APPS:
pytest.skip("Need shuup.simple_supplier in INSTALLED_APPS")
from shuup_tests.simple_supplier.utils import get_simple_supplier
shop = get_default_shop()
supplier = get_simple_supplier()
order = _get_order(shop, supplier, stocked=True)
product_line = order.lines.products().first()
product = product_line.product
assert product_line.quantity == 15
supplier.adjust_stock(product.pk, delta=10)
stock_status = supplier.get_stock_status(product.pk)
assert stock_status.physical_count == 10
order.create_shipment({product: 5}, supplier=supplier)
assert order.shipping_status == ShippingStatus.PARTIALLY_SHIPPED
assert order.shipments.all().count() == 1
with pytest.raises(Problem):
order.create_shipment({product: 10}, supplier=supplier)
# Should be fine after adding more stock
supplier.adjust_stock(product.pk, delta=5)
order.create_shipment({product: 10}, supplier=supplier)
def _get_order(shop, supplier, stocked=False):
order = create_empty_order(shop=shop)
order.full_clean()
order.save()
for product_data in _get_product_data(stocked):
quantity = product_data.pop("quantity")
product = create_product(
sku=product_data.pop("sku"),
shop=shop,
supplier=supplier,
default_price=3.33,
**product_data)
add_product_to_order(order, supplier, product, quantity=quantity, taxless_base_unit_price=1)
order.cache_prices()
order.check_all_verified()
order.save()
return order
def _get_product_data(stocked=False):
return [
{
"sku": "sku1234",
"net_weight": decimal.Decimal("1"),
"gross_weight": decimal.Decimal("43.34257"),
"quantity": decimal.Decimal("15"),
"stock_behavior": StockBehavior.STOCKED if stocked else StockBehavior.UNSTOCKED
}
]
|
vtemian/university_projects
|
practic_stage/hmw4/strategies/insertion.py
|
Python
|
apache-2.0
| 524
| 0.013359
|
from copy import deepcopy
from .base import Strategy
class Inse
|
rtionSort(Strategy):
def sort_by(self, field):
return self._sort(lambda x, y: x.grades[field] < y.grades[field])
def sort(self):
return self._sort(lambda x, y: x < y)
def _sort(self, compare):
for first_item in self.items:
items = deepcopy(self.items)
items.iterator_start = first_item.next
for second_item in items:
|
if compare(first_item, second_item):
self.items.interchange(first_item, second_item)
|
Crespo911/pyspace
|
pySPACE/environments/chains/node_chain.py
|
Python
|
gpl-3.0
| 60,058
| 0.003014
|
# coding=utf-8
""" NodeChains are sequential orders of :mod:`~pySPACE.missions.nodes`
.. image:: ../../graphics/node_chain.png
:width: 500
There are two main use cases:
* the app
|
lication for :mod:`~pySPACE.run.launch_live` and the
:mod:`~pySPACE.environments.live` using the default
:class:`NodeChain` and
* the benchmarking with :mod:`~pySPACE.run.launch` using
the :class:`BenchmarkNodeChain` with the
:mod:`~pySPACE.missions.operations.node_chain` operation.
.. seealso::
- :mod:`~pySPACE.missions.nodes`
- :ref:`node_list`
- :m
|
od:`~pySPACE.missions.operations.node_chain` operation
.. image:: ../../graphics/launch_live.png
:width: 500
.. todo:: Documentation
This module extends/reimplements the original MDP flow class and
has some additional methods like reset(), save() etc.
Furthermore it supports the construction of NodeChains and
also running them inside nodes in parallel.
MDP is distributed under the following BSD license::
This file is part of Modular toolkit for Data Processing (MDP).
All the code in this package is distributed under the following conditions:
Copyright (c) 2003-2012, MDP Developers <mdp-toolkit-devel@lists.sourceforge.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Modular toolkit for Data Processing (MDP)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
if __name__ == '__main__':
# add root of the code to system path
file_path = os.path.dirname(os.path.abspath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import cPickle
import gc
import logging
import multiprocessing
import shutil
import socket
import time
import uuid
import yaml
import pySPACE
from pySPACE.tools.filesystem import create_directory
from pySPACE.tools.socket_utils import talk, inform
from pySPACE.tools.conversion import python2yaml, replace_parameters_and_convert, replace_parameters
import copy
import warnings
import traceback
import numpy
class CrashRecoveryException(Exception):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, crashing_obj, parent_exception)
The crashing object is kept in self.crashing_obj
The triggering parent exception is kept in ``self.parent_exception``.
"""
errstr = args[0]
self.crashing_obj = args[1]
self.parent_exception = args[2]
# ?? python 2.5: super(CrashRecoveryException, self).__init__(errstr)
super(CrashRecoveryException,self).__init__(self, errstr)
def dump(self, filename = None):
"""
Save a pickle dump of the crashing object on filename.
If filename is None, the crash dump is saved on a file created by
the tempfile module.
Return the filename.
"""
import cPickle
import tempfile
if filename is None:
(fd, filename)=tempfile.mkstemp(suffix=".pic", prefix="NodeChainCrash_")
fl = os.fdopen(fd, 'w+b', -1)
else:
fl = open(filename, 'w+b', -1)
cPickle.dump(self.crashing_obj, fl)
fl.close()
return filename
class NodeChainException(Exception):
"""Base class for exceptions in node chains."""
pass
class NodeChainExceptionCR(CrashRecoveryException, NodeChainException):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, flow_instance, parent_exception)
The triggering parent exception is kept in self.parent_exception.
If ``flow_instance._crash_recovery`` is set, save a crash dump of
flow_instance on the file self.filename
"""
CrashRecoveryException.__init__(self, *args)
rec = self.crashing_obj._crash_recovery
errstr = args[0]
if rec:
if isinstance(rec, str):
name = rec
else:
name = None
name = CrashRecoveryException.dump(self, name)
dumpinfo = '\nA crash dump is available on: "%s"' % name
self.filename = name
errstr = errstr+dumpinfo
Exception.__init__(self, errstr)
class NodeChain(object):
""" Reimplement/overwrite mdp.Flow methods e.g., for supervised learning """
def __init__(self, node_sequence, crash_recovery=False, verbose=False):
""" Creates the NodeChain based on the node_sequence
.. note:: The NodeChain cannot be executed before not all trainable
nodes have been trained, i.e. self.trained() == True.
"""
self._check_nodes_consistency(node_sequence)
self.flow = node_sequence
self.verbose = verbose
self.set_crash_recovery(crash_recovery)
# Register the direct predecessor of a node as its input
# (i.e. we assume linear flows)
for i in range(len(node_sequence) - 1):
node_sequence[i+1].register_input_node(node_sequence[i])
self.use_test_data = False
# set a default run number
self[-1].set_run_number(0)
# give this flow a unique identifier
self.id = str(uuid.uuid4())
self.handler = None
self.store_intermediate_results = True
def train(self, data_iterators=None):
""" Train NodeChain with data from iterator or source node
The method can proceed in two different ways:
* If no data is provided, it is checked that the first node of
the flow is a source node. If that is the case, the data provided
by this node is passed forward through the flow. During this
forward propagation, the flow is trained.
The request of the data is done in the last node.
* If a list of data iterators is provided,
then it is checked that no source
and split nodes are contained in the NodeChain.
these nodes only include already a data handling
and should not be used, when training is done in different way.
Furthermore, split nodes are relevant for benchmarking.
One iterator for each node has to be given.
If only one is given, or no list, it is mapped to a list
with the same iterator for each node.
.. note:: The iterator approach is normally not used in pySPACE,
because pySPACE supplies the data with special
source nodes and is doing the training automatically
without explicit calls o
|
google/uncertainty-baselines
|
uncertainty_baselines/datasets/imagenet_test.py
|
Python
|
apache-2.0
| 1,536
| 0.002604
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ImageNet."""
import tensorflow as tf
import uncertainty_baselines as ub
# TODO(dusenberrymw): Use TFDS mocking.
class ImageNetDatasetTest(ub.datasets.DatasetTest):
# TODO(dusenberrymw): Rename to `test_dataset_size`.
def testDatasetSize(self):
super()._testDatasetSize(
ub.datasets.ImageNetDataset, (224, 224, 3), validation_percent=0.1)
def test_expected_features(self):
builder = ub.datasets.ImageNetDataset('train')
dataset = builder.load(batch_size=1)
self.assertEqual(list(dataset.element_spec.keys()), ['features', 'labels'])
builder_with_file_name = ub.datase
|
ts.ImageNetDataset(
'train', include_file_name=True)
dataset_with_file_name = builder_with_file_name.load(batch_size=1)
self.assertEqual(
list(dataset_with_file_name.element_spec.keys()),
['features', 'labels', 'file
|
_name'])
if __name__ == '__main__':
tf.test.main()
|
intenthq/code-challenges
|
python/connected_graph/connected_graph.py
|
Python
|
mit
| 637
| 0.00314
|
class Node(object):
"""Find if two nodes in a directed graph are connected.
Based on http://www.codewars.com/kata/53897d3187c26d42ac00040d
For example:
a -+-> b -> c -> e
|
|
+-> d
a.connected_to(a) == true
a.connected_to(b) == true
a.connected_to(c) == true
b.connected_to(d) == false"""
def __init__(self, value, edges=None):
self.value = value
#What is the purpose of this construct?
self.edges = edges or []
def connected_to(self, target):
raise Value
|
Error("Not implemented")
def __eq__(self, other):
return self.value == other.value
|
karllessard/tensorflow
|
tensorflow/python/framework/subscribe_test.py
|
Python
|
apache-2.0
| 13,361
| 0.005838
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
@test_util.run_deprecated_v1
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.cached_session() as sess:
c_out = self.evaluate([c])
n_out = self.evaluate([n])
d_out = self.evaluate([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
@test_util.run_deprecated_v1
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegex(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
@test_util.run_deprecated_v1
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
c_out = self.evaluate([c])
d_out = self.evaluate([d])
self.assertEqual(c_out, [42])
self.assertEqual(d_out, [11])
self.assertEqual(shared, {2: 1, 1: 1})
@test_util.run_deprecated_v1
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
|
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
@test_util.run_deprecated_v1
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect
|
graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEqual(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.cached_session() as sess:
self.evaluate([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
@test_util.run_v1_only('b/120545219')
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.VariableV1(0.0)
v2 = variables.VariableV1(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.cached_session() as sess:
# Initialize the variables first.
|
anhstudios/swganh
|
data/scripts/templates/object/static/particle/shared_particle_geyser_center.py
|
Python
|
mit
| 451
| 0.046563
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
|
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_particle_geyser_center.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS
|
####
return result
|
viaict/viaduct
|
app/views/lang.py
|
Python
|
mit
| 1,481
| 0
|
#!/usr/bin/env python
# encoding: utf-8
from flask import Blueprint, redirect, session, url_for, flash
from flask_babel import _
from fla
|
sk_babel import refresh
from flask_login import current_user
from app import db, constants
from app.views import redirect_back
blueprint = Blueprint('lang', __name__, url_prefix='/lang')
@blueprint.route('/set/<path:lang>', methods=['GET'])
def set_user_lang(lang=None):
if lang not in constants.LANGUAGES.keys():
flash(_('Language unsupported on this site') + ': ' + lang, 'warning')
return redi
|
rect(url_for('home.home'))
if current_user.is_anonymous:
flash(_('You need to be logged in to set a permanent language.'))
return redirect_back()
current_user.locale = lang
db.session.add(current_user)
db.session.commit()
refresh()
return redirect_back()
@blueprint.route('/<path:lang>', methods=['GET'])
def set_lang(lang=None):
if lang not in constants.LANGUAGES.keys():
flash(_('Language unsupported on this site') + ': ' + lang, 'warning')
return redirect(url_for('home.home'))
session['lang'] = lang
if current_user.is_authenticated:
msg = _("{} is now set as language for this session. To make this "
"setting permanent, <a href='{}'>click here</a>")
flash(msg.format(constants.LANGUAGES[lang],
url_for('lang.set_user_lang', lang=lang)),
'safe')
return redirect_back()
|
StackVista/sts-agent-integrations-core
|
yarn/check.py
|
Python
|
bsd-3-clause
| 20,533
| 0.00375
|
'''
YARN Cluster Metrics
--------------------
yarn.metrics.appsSubmitted The number of submitted apps
yarn.metrics.appsCompleted The number of completed apps
yarn.metrics.appsPending The number of pending apps
yarn.metrics.appsRunning The number of running apps
yarn.metrics.appsFailed The number of failed apps
yarn.metrics.appsKilled The number of killed apps
yarn.metrics.reservedMB The size of reserved memory
yarn.metrics.availableMB The amount of available memory
yarn.metrics.allocatedMB The amount of allocated memory
yarn.metrics.totalMB The amount of total memory
yarn.metrics.reservedVirtualCores The number of reserved virtual cores
yarn.metrics.availableVirtualCores The number of available virtual cores
yarn.metrics.allocatedVirtualCores The number of allocated virtual cores
yarn.metrics.totalVirtualCores The total number of virtual cores
yarn.metrics.containersAllocated The number of containers allocated
yarn.metrics.containersReserved The number of
|
containers reserved
yarn.metrics.containersPending The number of containers pending
yarn.metrics.to
|
talNodes The total number of nodes
yarn.metrics.activeNodes The number of active nodes
yarn.metrics.lostNodes The number of lost nodes
yarn.metrics.unhealthyNodes The number of unhealthy nodes
yarn.metrics.decommissionedNodes The number of decommissioned nodes
yarn.metrics.rebootedNodes The number of rebooted nodes
YARN App Metrics
----------------
yarn.app.progress The progress of the application as a percent
yarn.app.startedTime The time in which application started (in ms since epoch)
yarn.app.finishedTime The time in which the application finished (in ms since epoch)
yarn.app.elapsedTime The elapsed time since the application started (in ms)
yarn.app.allocatedMB The sum of memory in MB allocated to the applications running containers
yarn.app.allocatedVCores The sum of virtual cores allocated to the applications running containers
yarn.app.runningContainers The number of containers currently running for the application
yarn.app.memorySeconds The amount of memory the application has allocated (megabyte-seconds)
yarn.app.vcoreSeconds The amount of CPU resources the application has allocated (virtual core-seconds)
YARN Node Metrics
-----------------
yarn.node.lastHealthUpdate The last time the node reported its health (in ms since epoch)
yarn.node.usedMemoryMB The total amount of memory currently used on the node (in MB)
yarn.node.availMemoryMB The total amount of memory currently available on the node (in MB)
yarn.node.usedVirtualCores The total number of vCores currently used on the node
yarn.node.availableVirtualCores The total number of vCores available on the node
yarn.node.numContainers The total number of containers currently running on the node
YARN Capacity Scheduler Metrics
-----------------
yarn.queue.root.maxCapacity The configured maximum queue capacity in percentage for root queue
yarn.queue.root.usedCapacity The used queue capacity in percentage for root queue
yarn.queue.root.capacity The configured queue capacity in percentage for root queue
yarn.queue.numPendingApplications The number of pending applications in this queue
yarn.queue.userAMResourceLimit.memory The maximum memory resources a user can use for Application Masters (in MB)
yarn.queue.userAMResourceLimit.vCores The maximum vCpus a user can use for Application Masters
yarn.queue.absoluteCapacity The absolute capacity percentage this queue can use of entire cluster
yarn.queue.userLimitFactor The minimum user limit percent set in the configuration
yarn.queue.userLimit The user limit factor set in the configuration
yarn.queue.numApplications The number of applications currently in the queue
yarn.queue.usedAMResource.memory The memory resources used for Application Masters (in MB)
yarn.queue.usedAMResource.vCores The vCpus used for Application Masters
yarn.queue.absoluteUsedCapacity The absolute used capacity percentage this queue is using of the entire cluster
yarn.queue.resourcesUsed.memory The total memory resources this queue is using (in MB)
yarn.queue.resourcesUsed.vCores The total vCpus this queue is using
yarn.queue.AMResourceLimit.vCores The maximum vCpus this queue can use for Application Masters
yarn.queue.AMResourceLimit.memory The maximum memory resources this queue can use for Application Masters (in MB)
yarn.queue.capacity The configured queue capacity in percentage relative to its parent queue
yarn.queue.numActiveApplications The number of active applications in this queue
yarn.queue.absoluteMaxCapacity The absolute maximum capacity percentage this queue can use of the entire cluster
yarn.queue.usedCapacity The used queue capacity in percentage
yarn.queue.numContainers The number of containers being used
yarn.queue.maxCapacity The configured maximum queue capacity in percentage relative to its parent queue
yarn.queue.maxApplications The maximum number of applications this queue can have
yarn.queue.maxApplicationsPerUser The maximum number of active applications per user this queue can have
'''
# stdlib
from urlparse import urljoin, urlsplit, urlunsplit
# 3rd party
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError
import requests
# Project
from checks import AgentCheck
from config import _is_affirmative
# Default settings
DEFAULT_RM_URI = 'http://localhost:8088'
DEFAULT_TIMEOUT = 5
DEFAULT_CUSTER_NAME = 'default_cluster'
DEFAULT_COLLECT_APP_METRICS = True
MAX_DETAILED_QUEUES = 100
# Path to retrieve cluster metrics
YARN_CLUSTER_METRICS_PATH = '/ws/v1/cluster/metrics'
# Path to retrieve YARN APPS
YARN_APPS_PATH = '/ws/v1/cluster/apps'
# Path to retrieve node statistics
YARN_NODES_PATH = '/ws/v1/cluster/nodes'
# Path to retrieve queue statistics
YARN_SCHEDULER_PATH = '/ws/v1/cluster/scheduler'
# Metric types
GAUGE = 'gauge'
INCREMENT = 'increment'
# Name of the service check
SERVICE_CHECK_NAME = 'yarn.can_connect'
# Application states to collect
YARN_APPLICATION_STATES = 'RUNNING'
# Cluster metrics identifier
YARN_CLUSTER_METRICS_ELEMENT = 'clusterMetrics'
# Cluster metrics for YARN
YARN_CLUSTER_METRICS = {
'appsSubmitted': ('yarn.metrics.apps_submitted', GAUGE),
'appsCompleted': ('yarn.metrics.apps_completed', GAUGE),
'appsPending': ('yarn.metrics.apps_pending', GAUGE),
'appsRunning': ('yarn.metrics.apps_running', GAUGE),
'appsFailed': ('yarn.metrics.apps_failed', GAUGE),
'appsKilled': ('yarn.metrics.apps_killed', GAUGE),
'reservedMB': ('yarn.metrics.reserved_mb', GAUGE),
'availableMB': ('yarn.metrics.available_mb', GAUGE),
'allocatedMB': ('yarn.metrics.allocated_mb', GAUGE),
'totalMB': ('yarn.metrics.total_mb', GAUGE),
'reservedVirtualCores': ('yarn.metrics.reserved_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.metrics.available_virtual_cores', GAUGE),
'allocatedVirtualCores': ('yarn.metrics.allocated_virtual_cores', GAUGE),
'totalVirtualCores': ('yarn.metrics.total_virtual_cores', GAUGE),
'containersAllocated': ('yarn.metrics.containers_allocated', GAUGE),
'containersReserved': ('yarn.metrics.containers_reserved', GAUGE),
'containersPending': ('yarn.metrics.containers_pending', GAUGE),
'totalNodes': ('yarn.metrics.total_nodes', GAUGE),
'activeNodes': ('yarn.metrics.active_nodes', GAUGE),
'lostNodes': ('yarn.metrics.lost_nodes', GAUGE),
'unhealthyNodes': ('yarn.metrics.unhealthy_nodes', GAUGE),
'decommissionedNodes': ('yarn.metrics.decommissioned_nodes', GAUGE),
'rebootedNodes': ('yarn.metrics.rebooted_nodes', GAUGE),
}
# Application metrics for YARN
YARN_APP_METRICS = {
'progress': ('yarn.
|
lampwins/netbox
|
netbox/dcim/migrations/0020_rack_desc_units.py
|
Python
|
apache-2.0
| 493
| 0.002028
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-28 15:01
from django.db impo
|
rt migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0019_new_iface_form_factors'),
]
operations = [
migrations.AddField(
model_name='rack',
name='desc_units',
field=models.BooleanField(default=False, help_text=b'Units are numbered top-to-bottom', verbose_name=b'De
|
scending units'),
),
]
|
nicodv/kmodes
|
examples/soybean.py
|
Python
|
mit
| 1,461
| 0.000684
|
#!/usr/bin/env python
import numpy as np
from kmodes.kmodes import KModes
# reproduce results on small soybean data set
x = np.genfromtxt('soybean.csv', dtype=int, delimiter=',')[:, :-1]
y = np.genfromtxt('soybean.csv', dtype=str, delimiter=',', usecols=(35, ))
kmode
|
s_huang = KModes(n_clusters=4, init='Huang', verbose=1)
kmodes_huang.fit(x)
# Print cluster centroids of the trained model.
print('k-modes (Huang) centroids:')
print(kmodes_huang.cluster_centroids_)
# Print training statistics
print('Final training cost: {}'.format(kmodes_huang.cost_))
print('Training iterations: {}'.format(kmodes_h
|
uang.n_iter_))
kmodes_cao = KModes(n_clusters=4, init='Cao', verbose=1)
kmodes_cao.fit(x)
# Print cluster centroids of the trained model.
print('k-modes (Cao) centroids:')
print(kmodes_cao.cluster_centroids_)
# Print training statistics
print('Final training cost: {}'.format(kmodes_cao.cost_))
print('Training iterations: {}'.format(kmodes_cao.n_iter_))
print('Results tables:')
for result in (kmodes_huang, kmodes_cao):
classtable = np.zeros((4, 4), dtype=int)
for ii, _ in enumerate(y):
classtable[int(y[ii][-1]) - 1, result.labels_[ii]] += 1
print("\n")
print(" | Cl. 1 | Cl. 2 | Cl. 3 | Cl. 4 |")
print("----|-------|-------|-------|-------|")
for ii in range(4):
prargs = tuple([ii + 1] + list(classtable[ii, :]))
print(" D{0} | {1:>2} | {2:>2} | {3:>2} | {4:>2} |".format(*prargs))
|
CloudNcodeInc/django-phonenumber-field
|
phonenumber_field/phonenumber.py
|
Python
|
mit
| 3,532
| 0.001982
|
#-*- coding: utf-8 -*-
import phonenumbers
from django.conf import settings
from django.core import validators
from django.utils.six import string_types
from phonenumbers.phonenumberutil import NumberParseException
class PhoneNumber(phonenumbers.phonenumber.PhoneNumber):
"""
A extended version of phonenumbers.phonenumber.PhoneNumber that provides some neat and more pythonic, easy
to access methods. This makes using a PhoneNumber instance much easier, especially in templates and such.
"""
format_map = {
'E164': phonenumbers.PhoneNumberFormat.E164,
'INTERNATIONAL': phonenumbers.PhoneNumberFormat.INTERNATIONAL,
'NATIONAL': phonenumbers.PhoneNumberFormat.NATIONAL,
'RFC3966': phonenumbers.PhoneNumberFormat.RFC3966,
}
@classmethod
def from_string(cls, phone_number, region=None):
phone_number_obj = cls()
if region is None:
region = getattr(settings, 'PHONENUMBER_DEFAULT_REGION', None) or getattr(settings, 'PHONENUMER_DEFAULT_REGION', None)
phonenumbers.parse(number=phone_number, region=region,
|
keep_raw_input=True, numobj=phone_number_obj)
return phone_number_obj
def __unicode__(self):
if self.is_valid():
if self.extension:
return u"%sx%s" % (self.as_e164, self.e
|
xtension)
return self.as_e164
return self.raw_input
def __str__(self):
return str(self.__unicode__())
def original_unicode(self):
return super(PhoneNumber, self).__unicode__()
def is_valid(self):
"""
checks whether the number supplied is actually valid
"""
return phonenumbers.is_valid_number(self)
def format_as(self, format):
if self.is_valid():
return phonenumbers.format_number(self, format)
else:
return self.raw_input
@property
def as_international(self):
return self.format_as(phonenumbers.PhoneNumberFormat.INTERNATIONAL)
@property
def as_e164(self):
return self.format_as(phonenumbers.PhoneNumberFormat.E164)
@property
def as_national(self):
return self.format_as(phonenumbers.PhoneNumberFormat.NATIONAL)
@property
def as_rfc3966(self):
return self.format_as(phonenumbers.PhoneNumberFormat.RFC3966)
def __len__(self):
return len(self.__unicode__())
def __eq__(self, other):
if type(other) == PhoneNumber:
return self.as_rfc3966 == other.as_rfc3966
else:
return super(PhoneNumber, self).__eq__(other)
def __hash__(self):
return hash(self.as_rfc3966)
def to_python(value):
if value in validators.EMPTY_VALUES: # None or ''
phone_number = None
elif value and isinstance(value, string_types):
try:
phone_number = PhoneNumber.from_string(phone_number=value)
except NumberParseException:
# the string provided is not a valid PhoneNumber.
phone_number = PhoneNumber(raw_input=value)
elif isinstance(value, phonenumbers.phonenumber.PhoneNumber) and not isinstance(value, PhoneNumber):
phone_number = PhoneNumber(value)
elif isinstance(value, PhoneNumber):
phone_number = value
else:
# TODO: this should somehow show that it has invalid data, but not completely die for
# bad data in the database. (Same for the NumberParseException above)
phone_number = None
return phone_number
|
musicbrainz/picard
|
picard/util/progresscheckpoints.py
|
Python
|
gpl-2.0
| 1,663
| 0.001203
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2020 Gabriel Ferreira
# Copyright (C) 2020 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Found
|
ation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class ProgressCheckpoints:
def __init__(self, num_jobs, num_checkpoints=10):
"""Create a set of unique an
|
d evenly spaced indexes of jobs, used as checkpoints for progress"""
self.num_jobs = num_jobs
self._checkpoints = {}
if num_checkpoints > 0:
self._offset = num_jobs/num_checkpoints
for i in range(1, num_checkpoints):
self._checkpoints[int(i*self._offset)] = 100*i//num_checkpoints
if num_jobs > 0:
self._checkpoints[num_jobs-1] = 100
def is_checkpoint(self, index):
if index in self._checkpoints:
return True
return False
def progress(self, index):
try:
return self._checkpoints[index]
except KeyError:
return None
|
yyamano/RESTx
|
src/python/starter.py
|
Python
|
gpl-3.0
| 2,796
| 0.011803
|
"""
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Simple starter for stand-alone RESTx server.
"""
import os
import sys
import time
import getopt
# RESTx i
|
mports
import restx.settings as settings
import restx.logger as logger
from restx.core import RequestDispatcher
from restx.platform_specifics import *
from org.mulesoft.restx import Settings
from org.mulesof
|
t.restx.util import Url
from org.mulesoft.restx.component.api import *
def print_help():
print \
"""
RESTx server (c) 2010 MuleSoft
Usage: jython starter.py [options]
Options:
-h, --help
Print this help screen.
-P, --port <num>
Port on which the server listens for requests.
-p, --pidfile <filename>
If specified, the PID of the server is stored in <filename>.
-l, --logfile <filename>
If specified, the filename for the logfile. If not specified,
output will go to the console.
-r, --rootdir <dirname>
Root directory of the RESTx install
"""
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "hl:P:p:r:", ["help", "logfile=", "port=", "pidfile=", "rootdir="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
print_help()
sys.exit(1)
port = settings.LISTEN_PORT
for o, a in opts:
if o in ("-p", "--pidfile"):
# Writing our process ID
pid = os.getpid()
f = open(a, "w")
f.write(str(pid))
f.close()
elif o in ("-h", "--help"):
print_help()
sys.exit(0)
elif o in ("-P", "--port"):
port = int(a)
elif o in ("-r", "--rootdir"):
rootdir = str(a)
settings.set_root_dir(rootdir)
elif o in ("-l", "--logfile"):
logger.set_logfile(a)
my_server = HttpServer(port, RequestDispatcher())
|
Azarn/mytodo
|
todo/migrations/0007_auto_20160530_1233.py
|
Python
|
apache-2.0
| 593
| 0.001686
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 12:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('todo', '0006_auto_20160530_
|
1210'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='category',
field=models.ForeignKey(blank=True, default=1, on_delet
|
e=django.db.models.deletion.DO_NOTHING, to='todo.Category'),
preserve_default=False,
),
]
|
kubeflow/pipelines
|
manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
|
Python
|
apache-2.0
| 16,824
| 0.00107
|
# Copyright 2020-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
def main():
settings = get_settings_from_env()
server = server_factory(**settings)
server.serve_forever()
def get_settings_from_env(controller_port=None,
visualization_server_image=None, frontend_image=None,
visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None,
minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None):
"""
Returns a dict of settings from environment variables relevant to the controller
Environment settings can be overridden by passing them here as arguments.
Settings are pulled from the all-caps version of the setting name. The
following defaults are used if those environment variables are not set
to enable backwards compatibility with previous versions of this script:
visualization_server_image: gcr.io/ml-pipeline/visualization-server
visualization_server_tag: value of KFP_VERSION environment variable
frontend_image: gcr.io/ml-pipeline/frontend
frontend_tag: value of KFP_VERSION environment variable
disable_istio_sidecar: Required (no default)
minio_access_key: Required (no default)
minio_secret_key: Required (no default)
"""
settings = dict()
settings["controller_port"] = \
controller_port or \
os.environ.get("CONTROLLER_PORT", "8080")
settings["visualization_server_image"] = \
visualization_server_image or \
os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server")
settings["frontend_image"] = \
frontend_image or \
os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend")
# Look for specific tags for each image first, falling back to
# previously used KFP_VERSION environment variable for backwards
# compatibility
settings["visualization_server_tag"] = \
visualization_server_tag or \
os.environ.get("VISUALIZATION_SERVER_TAG") or \
os.environ["KFP_VERSION"]
settings["frontend_tag"] = \
frontend_tag or \
os.environ.get("FRONTEND_TAG") or \
os.environ["KFP_VERSION"]
settings["disable_istio_sidecar"] = \
disable_istio_sidecar if disable_istio_sidecar is not None \
else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
settings["minio_access_key"] = \
minio_access_key or \
base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
settings["minio_secret_key"] = \
minio_secret_key or \
base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
# KFP_DEFAULT_PIPELINE_ROOT is optional
settings["kfp_default_pipeline_root"] = \
kfp_default_pipeline_root or \
os.environ.get("KFP_DEFAULT_PIPELINE_ROOT")
return settings
def server_factory(visualization_server_image,
visualization_server_tag, frontend_image, frontend_tag,
disable_istio_sidecar, minio_access_key,
minio_secret_key, kfp_default_pipeline_root=None,
url="", controller_port=8080):
"""
Returns an HTTPServer populated with Handler with customized settings
"""
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("pipelines.kubeflow.org/enabled")
if pipeline_enabled != "true":
return {"status": {}, "children": []}
desired_configmap_count = 1
desired_resources = []
if kfp_default_pipeline_root:
desired_configmap_count = 2
desired_resources += [{
|
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "kfp-launcher",
"namespace": namespace,
},
"data": {
"defaultPipelineRoot": kfp_default_pipeline_root,
},
}]
# Compute status based on observed state.
desired_status = {
"k
|
ubeflow-pipelines-ready":
len(children["Secret.v1"]) == 1 and
len(children["ConfigMap.v1"]) == desired_configmap_count and
len(children["Deployment.apps/v1"]) == 2 and
len(children["Service.v1"]) == 2 and
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and
len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and
"True" or "False"
}
# Generate the desired child object(s).
desired_resources += [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image": f"{visualization_server_image}:{visualization_server_tag}",
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
"resources": {
"requests": {
"cpu": "50m",
"memory": "20
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sympy/utilities/tests/test_source.py
|
Python
|
agpl-3.0
| 278
| 0.010791
|
from sympy.utilities.source import get_mod_func, get_class
def t
|
est_get_mod_func():
assert get_mod_func('sympy.core.basic.Basic') == ('sympy.core.basic', 'Basic')
def test_get_class():
_basic = get_class('sympy.core.basic.Basic')
assert _basic.__name__ =
|
= 'Basic'
|
CoherentLabs/depot_tools
|
recipes/recipe_modules/gclient/config.py
|
Python
|
bsd-3-clause
| 18,342
| 0.019082
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
try:
_STRING_TYPE = basestring
except NameError: # pragma: no cover
_STRING_TYPE = str
from recipe_engine.config import config_item_context, ConfigGroup, BadConf
from recipe_engine.config import ConfigList, Dict, Single, Static, Set, List
from . import api as gclient_api
def BaseConfig(USE_MIRROR=True, CACHE_DIR=None,
BUILDSPEC_VERSION=None, deps_file='.DEPS.git', **_kwargs):
cache_dir = str(CACHE_DIR) if CACHE_DIR else None
return ConfigGroup(
solutions = ConfigList(
lambda: ConfigGroup(
name = Single(_STRING_TYPE),
url = Single((_STRING_TYPE, type(None)), empty_val=''),
deps_file = Single(_STRING_TYPE, empty_val=deps_file, required=False,
hidden=False),
managed = Single(bool, empty_val=True, required=False, hidden=False),
custom_deps = Dict(value_type=(_STRING_TYPE, type(None))),
custom_vars = Dict(value_type=(_STRING_TYPE, bool)),
safesync_url = Single(_STRING_TYPE, required=False),
revision = Single(
(_STRING_TYPE, gclient_api.RevisionResolver),
required=False, hidden=True),
)
),
deps_os = Dict(value_type=_STRING_TYPE),
hooks = List(_STRING_TYPE),
target_os = Set(_STRING_TYPE),
target_os_only = Single(bool, empty_val=False, required=False),
target_cpu = Set(_STRING_TYPE),
target_cpu_only = Single(bool, empty_val=False, required=False),
cache_dir = Static(cache_dir, hidden=False),
# If supplied, use this as the source root (instead of the first solution's
# checkout).
src_root = Single(_STRING_TYPE, required=False, hidden=True),
# Maps 'solution' -> build_property
# TODO(machenbach): Deprecate this in favor of the one below.
# http://crbug.com/713356
got_revision_mapping = Dict(hidden=True),
# Maps build_property -> 'solution'
got_revision_reverse_mapping = Dict(hidden=True),
# Addition revisions we want to pass in. For now there's a duplication
# of code here of setting custom vars AND passing in --revision. We hope
# to remove custom vars later.
revisions = Dict(
value_type=(_STRING_TYPE, gclient_api.RevisionResolver),
hidden=True),
# TODO(iannucci): HACK! The use of None here to indicate that we apply this
# to the solution.revision field is really terrible. I mostly blame
# gclient.
# Maps 'parent_build_property' -> 'custom_var_name'
# Maps 'parent_build_property' -> None
# If value is None, the property value will be applied to
# solutions[0].revision. Otherwise, it will be applied to
# solutions[0].custom_vars['custom_var_name']
parent_got_revision_mapping = Dict(hidden=True),
delete_unversioned_trees = Single(bool, empty_val=True, required=False),
# Maps canonical repo URL to (local_path, revision).
# - canonical gitiles repo URL is "https://<host>/<project>"
# where project does not have "/a/" prefix or ".git" suffix.
# - solution/path is then used to apply patches as patch root in
# bot_update.
# - if revision is given, it's passed verbatim to bot_update for
# corresponding dependency. Otherwise (i.e. None), the patch will be
# applied on top of version pinned in DEPS.
# This is essentially a allowlist of which repos inside a solution
# can be patched automatically by bot_update based on
# api.buildbucket.build.input.gerrit_changes[0].project
# For example, if bare chromium solution has this entry in repo_path_map
# 'https://chromium.googlesource.com/angle/angle': (
# 'src/third_party/angle', 'HEAD')
# then a patch to Angle project can be applied to a chromium src's
# checkout after first updating Angle's repo to its main's HEAD.
repo_path_map = Dict(value_type=tuple, hidden=True),
# Check out refs/branch-heads.
# TODO (machenbach): Only implemented for bot_update atm.
with_branch_heads = Single(
bool,
empty_val=False,
required=False,
hidden=True),
# Check out refs/tags.
with_tags = Single(
bool,
empty_val=False,
required=False,
hidden=True),
USE_MIRROR = Static(bool(USE_MIRROR)),
BUILDSPEC_VERSION= Static(BUILDSPEC_VERSION, hidden=True),
)
config_ctx = config_item_context(BaseConfig)
def ChromiumGitURL(_c, *pieces):
return '/'.join(('https://chromium.googlesource.com',) + pieces)
# TODO(phajdan.jr): Move to proper repo and add coverage.
def ChromeInternalGitURL(_c, *pieces): # pragma: no cover
return '/'.join(('https://chrome-internal.googlesource.com',) + pieces)
@config_ctx()
def android(c):
c.target_os.add('android')
@config_ctx()
def nacl(c):
s = c.solutions.add()
s.name = 'native_client'
s.url = ChromiumGitURL(c, 'native_client', 'src', 'native_client.git')
m = c.got_revision_mapping
m['native_client'] = 'got_revision'
@config_ctx()
def webports(c):
s = c.solutions.add()
s.name = 'src'
s.url = ChromiumGitURL(c, 'webports.git')
m = c.got_revision_mapping
m['src'] = 'got_revision'
@config_ctx()
def emscripten_releases(c):
s = c.solutions.add()
s.name = 'emscripten-releases'
s.url = ChromiumGitURL(c, 'emscripten-releases.git')
m = c.got_revision_mapping
m['emscripten-releases'] = 'got_revision'
@config_ctx()
def gyp(c):
s = c.solutions.add()
s.name = 'gyp'
s.url = ChromiumGitURL(c, 'external', 'gyp.git')
m = c.got_revision_mapping
m['gyp'] = 'got_revision'
@config_ctx()
def build(c):
s = c.solutions.add()
s.name = 'build'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'build.git')
m = c.got_revision_mapping
m['build'] = 'got_revision'
@config_ctx()
def depot_tools(c): # pragma: no cover
s = c.solutions.add()
s.name = 'depot_tools'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'depot_tools.git')
m = c.got_revision_mapping
m['depot_tools'] = 'got_revision'
@config_ctx()
def skia(c): # pragma: no cover
s = c.solutions.add()
s.name = 'skia'
s.url = 'https://skia.googlesource.com/skia.git'
m = c.got_revision_mapping
m['skia'] = 'got_revision'
@config_ctx()
def skia_buildbot(c): # pragma: no cover
s = c.solutions.add()
s.name = 'skia_buildbot'
s.url = 'https://skia.googlesource.com/buildbot.git'
m = c.got_revision_mapping
m['skia_buildbot'] = 'got_revision'
@config_ctx()
def chrome_golo(c): # pragma: no cover
s = c.solutions.add()
s.name = 'chrome_golo'
s.url = 'https://chrome-internal.googlesource.com/chrome-golo/chrome-golo.git'
c.got_revision_mapping['chrome_golo'] = 'got_revision'
@config_ctx()
def infra_puppet(c): # pragma: no cover
s = c.solutions.add()
s.name = 'infra_puppet'
s.url = 'https://chrome-internal.googlesource.com/infra/puppet.git'
c.got_revision_mapping['infra_puppet'] = 'got_revision'
@config_ctx()
def build_internal(c):
s = c.solutions.add()
s.name = 'build_internal'
s.url = 'https://chrome-internal.googlesource.com/chrome/tools/build.git'
c.got_revision_mapping['build_internal'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first solution.
build(c)
c.got_revision_mapping['build'] = 'got_build_revision'
@config_ctx()
def build_internal_scripts_slave(c):
s = c.solutions.add()
s.name = 'build_internal/scripts/slave'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build_limited/scripts/slave.git')
c.got_revision_mapping['build_internal/scripts/slave'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first
|
solution.
build(c)
c.got_revision_mapping['build'] = 'got_bu
|
ild_revision'
@config_ctx()
def master_deps(c):
s = c.solutions.add()
s.name = 'master.DEPS'
s.url = ('https://chrome-internal.googlesource.c
|
tisnik/fabric8-analytics-common
|
a2t/src/auth.py
|
Python
|
apache-2.0
| 3,986
| 0.001254
|
"""Retrieve temporary access token by using refresh/offline token.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from fastlog import log
from urllib.parse import urljoin
import requests
# The following endpoint is used to retrieve the access token from OSIO AUTH service
_AUTH_ENDPOINT = "/api/token/refresh"
def check_access_token_attribute(token_structure):
"""Additional check for the access_token attribute."""
assert "access_token" in token_structure
item = token_structure["access_token"]
assert isinstance(item, str)
# 200 chars is quite conservative
assert len(token_structure["access_token"]) > 200
# TODO: better check for token internal structure
# 1) regexp-based
# 2) decode it + check if it has all required fields (possibly)
def check_token_type_attribute(token_structure):
"""Additional check for the token_type attribute."""
assert "token_type" in token_structure
item = token_structure["token_type"]
assert isinstance(item, str)
# we don't know about any other token type
assert item == "Bearer"
def check_expires_in_attribute(token_structure):
"""Additional check for the expires_in attribute."""
assert "token_type" in token_structure
item = token_structure["expires_in"]
assert isinstance(item, int)
assert item > 0
def check_refresh_expires_in_attribute(token_structure):
"""Additional check for the refresh_expires_in attribute."""
assert "token_type" in token_structure
item = token_structure["refresh_expires_in"]
assert isinstance(item, int)
asse
|
rt item > 0
def check_not_before_policy_attribute(token_structure):
"""A
|
dditional check for the not-before-policy attribute."""
assert "token_type" in token_structure
item = token_structure["not-before-policy"]
assert isinstance(item, int)
assert item >= 0
def get_and_check_token_structure(data):
"""Get the token structure from returned data and check the basic format."""
assert "token" in data
token_structure = data["token"]
assert "expires_in" in token_structure
check_access_token_attribute(token_structure)
check_token_type_attribute(token_structure)
check_expires_in_attribute(token_structure)
check_refresh_expires_in_attribute(token_structure)
check_not_before_policy_attribute(token_structure)
return token_structure
def retrieve_access_token(refresh_token, auth_service_url):
"""Retrieve temporary access token by using refresh/offline token."""
log.info("Trying to retrieve access token")
if refresh_token is None:
log.error("aborting: RECOMMENDER_REFRESH_TOKEN environment variable is not set")
return None
if auth_service_url is None:
log.error("aborting: OSIO_AUTH_SERVICE environment variable is not set")
return None
payload = {'refresh_token': refresh_token}
url = urljoin(auth_service_url, _AUTH_ENDPOINT)
response = requests.post(url, json=payload)
assert response is not None and response.ok, "Error communicating with the OSIO AUTH service"
data = response.json()
# check the basic structure of the response
token_structure = get_and_check_token_structure(data)
log.info("Token seems to be correct")
# seems like everything's ok, let's read the temporary access token
return token_structure["access_token"]
|
Eddy0402/Environment
|
vim/ycmd/third_party/bottle/test/tools.py
|
Python
|
gpl-3.0
| 5,129
| 0.005069
|
# -*- coding: utf-8 -*-
import bottle
import sys
import unittest
import wsgiref
import wsgiref.util
import wsgiref.validate
import mimetypes
import uuid
from bottle import tob, tonat, BytesIO, py3k, unicode
def warn(msg):
sys.stderr.write('WARNING: %s\n' % msg.strip())
def tobs(data):
''' Transforms bytes or unicode into a byte stream. '''
return BytesIO(tob(data))
def api(introduced, deprecated=None, removed=None):
current = tuple(map(int, bottle.__version__.split('-')[0].split('.')))
introduced = tuple(map(int, introduced.split('.')))
deprecated = tuple(map(int, deprecated.split('.'))) if deprecated else (99,99)
removed = tuple(map(int, removed.split('.'))) if removed else (99,100)
assert introduced < deprecated < removed
def decorator(func):
if current < introduced:
return None
elif current < deprecated:
return func
elif current < removed:
func.__doc__ = '(deprecated) ' + (func.__doc__ or '')
return func
else:
return None
return decorator
def wsgistr(s):
if py3k:
return s.encode('utf8').decode('latin1')
else:
return s
class ServerTestBase(unittest.TestCase):
def setUp(self):
''' Create a new Bottle app set it as default_app '''
self.port = 8080
self.host = 'localhost'
self.app = bottle.app.push()
self.wsgiapp = wsgiref.validate.validator(self.app)
def urlopen(self, path, method='GET', post='', env=None):
result = {'code':0, 'status':'error', 'header':{}, 'body':tob('')}
def start_response(status, header):
result['code'] = int(status.split()[0])
result['status'] = status.split(None, 1)[-1]
for name, value in header:
name = name.title()
if name in result['header']:
result['header'][name] += ', ' + value
else:
result['header'][name] = value
env = env if env else {}
wsgiref.util.setup_testing_defaults(env)
env['REQUEST_METHOD'] = wsgistr(method.upper().strip())
env['PATH_INFO'] = wsgistr(path)
env['QUERY_STRING'] = wsgistr('')
if post:
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_LENGTH'] = str(len(tob(post)))
env['wsgi.input'].write(tob(post))
env['wsgi.input'].seek(0)
response = self.wsgiapp(env, start_response)
for part in response:
try:
result['body'] += part
except TypeError:
raise TypeError('WSGI app yielded non-byte object %s', type(part))
if hasattr(response, 'close'):
response.close()
del response
return result
def postmultipart(self, path, fields, files):
env = multipart_environ(fields, files)
return self.urlopen(path, method='POST', env=env)
def tearDown(self):
bottle.app.pop()
def assertStatus(self, code, route='/', **kargs):
self.assertEqual(code, self.urlopen(route, **kargs)['code'])
def assertBody(self, body, route='/', **kargs):
self.assertEqual(tob(body), self.urlopen(route, **kargs)['body'])
def assertInBody(self, body, route='/', **kargs):
result = self.urlopen(route, **kargs)['body']
if tob(body) not in result:
self.fail('The search pattern "%s" is not included in body:\n%s' % (body, result))
def assertHeader(self, name, value, route='/', **kargs):
self.assertEqual(value, self.urlopen(route, **kargs)['header'].get(name))
def assertHeaderAny(self, name, route='/', **kargs):
self.assertTrue(self.urlopen(route, **kargs)['header
|
'].get(name, None))
def assertInError(self, search, route='/', **kargs):
bottle.request.environ['wsgi.errors'].errors.seek(0)
err = bottle.request.environ['wsgi.errors'].errors.read()
if search not in err:
self.fail('The search pattern "%s" is not included in wsgi.error: %s' % (search, err))
de
|
f multipart_environ(fields, files):
boundary = str(uuid.uuid1())
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary='+boundary}
wsgiref.util.setup_testing_defaults(env)
boundary = '--' + boundary
body = ''
for name, value in fields:
body += boundary + '\n'
body += 'Content-Disposition: form-data; name="%s"\n\n' % name
body += value + '\n'
for name, filename, content in files:
mimetype = str(mimetypes.guess_type(filename)[0]) or 'application/octet-stream'
body += boundary + '\n'
body += 'Content-Disposition: file; name="%s"; filename="%s"\n' % \
(name, filename)
body += 'Content-Type: %s\n\n' % mimetype
body += content + '\n'
body += boundary + '--\n'
if isinstance(body, unicode):
body = body.encode('utf8')
env['CONTENT_LENGTH'] = str(len(body))
env['wsgi.input'].write(body)
env['wsgi.input'].seek(0)
return env
|
qsnake/gpaw
|
doc/tutorials/lattice_constants/iron.agts.py
|
Python
|
gpl-3.0
| 1,569
| 0.003187
|
def agts(queue):
iron = queue.add('iron.py', ncpus=8, walltime=8 * 60)
queue.add('iron.agts.py', deps=[iron],
creates=['Fe_conv_k.png', 'Fe_conv_h.png'])
if __name__ == '__main__':
import numpy as np
import pylab as plt
from ase.utils.eos import EquationOfState
from ase.io import read
def f(width, k, g):
filename = 'Fe-FD-%.2f-%02d-%2d.traj' % (width, k, g)
configs = read(filename + '@::2')
# Extract volumes and energies:
volumes = [a.get_volume() for a in configs]
energies = [a.get_potential_energy() for a in
|
configs]
eos = EquationOfState(volumes
|
, energies)
v0, e0, B = eos.fit()
return v0, e0, B
kk = [2, 4, 6, 8, 10, 12]
plt.figure(figsize=(6, 4))
for width in [0.05, 0.1, 0.15, 0.2]:
a = []
for k in kk:
v0, e0, B = f(width, k, 12)
a.append((2 * v0)**(1.0 / 3.0))
print ('%7.3f ' * 7) % ((width,) + tuple(a))
plt.plot(kk, a, label='width = %.2f eV' % width)
plt.legend(loc='upper right')
#plt.axis(ymin=2.83, ymax=2.85)
plt.xlabel('number of k-points')
plt.ylabel('lattice constant [Ang]')
plt.savefig('Fe_conv_k.png')
plt.figure(figsize=(6, 4))
gg = np.arange(8, 32, 4)
a = []
for g in gg:
v0, e0, B = f(0.1, 8, g)
a.append((2 * v0)**(1.0 / 3.0))
plt.plot(2.84 / gg, a, 'o-')
#plt.axis(ymin=2.83, ymax=2.85)
plt.xlabel('grid-spacing [Ang]')
plt.ylabel('lattice constant [Ang]')
plt.savefig('Fe_conv_h.png')
|
open-rnd/ros3d-www
|
ros3dui/system/__init__.py
|
Python
|
mit
| 1,104
| 0
|
#
# Copyright (c) 2015 Open-RnD Sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, includi
|
ng without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING B
|
UT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
AllenInstitute/dipde
|
dipde/examples/singlepop_exponential_distribution.py
|
Python
|
gpl-3.0
| 2,322
| 0.009044
|
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from dipde.internals.internalpopulation imp
|
ort InternalPopulation
from dipde.internals.externalpopulation import ExternalPopulation
from dipde.internals.network import Network
from dipde.internals.connection import Connection as Connection
def get_simulation(dv=.001, update_method='approx', approx_order=None, tol=1e-8):
import scipy.stats as sps
# Create simulation:
b1 = ExternalPopulation(100)
i1 = InternalPopulation(v_min=0, v_max=.02, dv=d
|
v, update_method=update_method, approx_order=approx_order, tol=tol)
b1_i1 = Connection(b1, i1, 1, delays=0.0, weights=(sps.expon(0,.005), 201))
simulation = Network([b1, i1], [b1_i1])
return simulation
def example(show=True, save=False):
# Settings:
t0 = 0.
dt = .0001
dv = .0001
tf = .1
update_method = 'approx'
approx_order = 1
tol = 1e-14
# Run simulation:
simulation = get_simulation(dv=dv, update_method=update_method, approx_order=approx_order, tol=tol)
simulation.run(dt=dt, tf=tf, t0=t0)
i1 = simulation.population_list[1]
if show == True:
# Visualize:
plt.figure(figsize=(3,3))
plt.plot(i1.t_record, i1.firing_rate_record)
plt.plot([tf],[8.6687760498], 'r*')
plt.xlim([0,tf])
plt.ylim(ymin=0, ymax=10)
plt.xlabel('Time (s)')
plt.ylabel('Firing Rate (Hz)')
plt.tight_layout()
if save == True: plt.savefig('./singlepop_exponential_distribution.png')
plt.show()
return i1.t_record, i1.firing_rate_record
if __name__ == "__main__": example() # pragma: no cover
|
ammzen/SolveLeetCode
|
101SymmetricTree.py
|
Python
|
mit
| 1,224
| 0.00817
|
# -*- coding: utf-8 -*-
# Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
# For example, this binary tree is symmetric:
# 1
# / \
# 2 2
# / \ / \
# 3 4 4 3
# But the following is not:
# 1
# / \
# 2 2
# \ \
# 3 3
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isSymmetric(self, root):
if root is not None:
if not isTreeSym(root.left, root.right):
return False
return True
def isTreeSym(p, q):
if p == None and q == None:
return True
elif p and q:
return p.val
|
== q.val and isTreeSym(p.left, q.right) and isTreeSym(p.right, q.left)
else:
return False
if __name__ == '__main__':
s = Solution()
p1 = TreeNode(1)
p2 = TreeNode(2)
p3 = TreeNode(2)
|
p4 = None
p5 = TreeNode(3)
p6 = None
p7 = TreeNode(3)
p1.left = p2
p1.right = p3
p2.left = p4
p2.right = p5
p3.left = p6
p3.right = p7
print s.isSymmetric(p1)
|
HEPData/hepdata-converter
|
hepdata_converter/writers/yaml_writer.py
|
Python
|
gpl-2.0
| 3,048
| 0.00689
|
import yaml
# We try to dump using the CSafeDumper for speed improvements.
try:
from yaml import CSafeDumper as Dumper
except ImportError: #pragma: no cover
from yaml import SafeDumper as Dumper #pragma: no cover
from hepdata_converter.common import Option, OptionInitMixin
from hepdata_converter.writers import Writer
import os
def str_presenter(dumper, data):
if '\n' in data:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
class YAML(Writer):
help = 'Writes YAML output. Output should be defined as filepath to the directory where submission.yaml and associated ' \
'table files will be written'
@classmethod
def options(cls):
options = Writer.options()
options['single_file'] = Option('single-file', type=bool, default=False, variable_mapping='single_file',
required=False, help="If set output will be written to single yaml file, instead "
"of multiple files (separating data and metadata of the tables)")
return options
def __init__(self, *args, **kwargs):
super(YAML, self).__init__(single_file_output=True, *args, **kwargs)
Dumper.add_representer(str, str_presenter)
def write(self, data_in, data_out, *args, **kwargs):
"""
:param data_in:
:type data_in: hepconverter.parsers.ParsedData
:param data_out: path of the directory to which yaml files will be written
:type data_out: str
:param args:
:param kwargs:
"""
tables = data_in.tables
data = data_in.data
if self.hepdata_doi:
data['hepdata_doi'] = self.he
|
pdata_doi
for table in tables:
table.metadata['table_doi'] = self.hepdata_doi + '/t' + str(table.index)
if not isinstance(data_out, str) and not self.single_file:
raise ValueError("output is not string, and single_file flag is not specified")
if not self.sing
|
le_file:
self.create_dir(data_out)
with open(os.path.join(data_out, 'submission.yaml'), 'w') as submission_file:
yaml.dump_all([data] + [table.metadata for table in tables], submission_file, Dumper=Dumper, default_flow_style=None)
for table in tables:
with open(os.path.join(data_out, table.data_file), 'w') as table_file:
yaml.dump(table.data, table_file, Dumper=Dumper, default_flow_style=None)
else:
if isinstance(data_out, str):
with open(data_out, 'w') as submission_file:
yaml.dump_all([data] + [table.all_data for table in tables], submission_file, Dumper=Dumper, default_flow_style=None)
else: # expect filelike object
yaml.dump_all([data] + [table.all_data for table in tables], data_out, Dumper=Dumper, default_flow_style=None)
|
franklai/lyric-get
|
lyric_engine/modules/uta_net.py
|
Python
|
mit
| 3,585
| 0.000561
|
import logging
from utils import common
from utils.lyric_base import LyricBase
site_class = 'UtaNet'
site_index = 'uta_net'
site_keyword = 'uta-net'
site_url = 'http://www.uta-net.com/'
test_url = 'http://www.uta-net.com/song/138139/'
test_expect_length = 1089
# current url format
# 'http://www.uta-net.com/song/138139/'
#
# former url
# 'http://www.uta-net.com/user/phplib/view_0.php?ID=17248'
class UtaNet(LyricBase):
def parse_page(self):
url = self.url
if not self.find_lyric(url):
logging.info('Failed to get lyric of url [%s]', url)
return False
if not self.find_song_info(url):
logging.info('Failed to get song info of url [%s]', url)
return True
def find_lyric(self, url):
pattern = '/[a-z]+/([0-9]+)/'
song_id = common.get_first_group_by_pattern(url, pattern)
if not song_id:
# try old pattern
# http://www.uta-net.com/user/phplib/view_0.php?ID=17248
pattern = 'ID=([0-9]+)'
song_id = common.get_first_group_by_pattern(url, pattern)
if not song_id:
logging.info('Failed to get id of url [%s]', url)
return False
# http://www.uta-net.com/user/phplib/svg/showkasi.php?ID=17248&WIDTH=560&HEIGHT=756&FONTSIZE=15&t=1489258939
showkasi_pattern = 'http://www.uta-net.com/user/phplib/svg/showkasi.php?ID=%s'
song_url = showkasi_pattern % (song_id, )
data = common.get_url_content(song_url)
if not data:
logging.info('Failed to get content of url [%s]', song_url)
return False
prefix = '<svg '
suffix = '</svg>'
lyric = common.find_string_by_prefix_suffix(data, prefix, suffix, True)
if not lyric:
logging.error('Failed to get lyric of url [%s]', url)
return False
lyric = lyric.replace('</text>', '\n')
lyric = common.strip_tags(lyric)
lyric = lyric.strip()
# test for half to full
lyric = common.half2full(lyric)
self.lyric = lyric
return True
def find_song_i
|
nfo(self, url):
ret = True
html = common
|
.get_url_content(url)
patterns = {
'title': '<h2[^>]*>([^<]+)</h2>',
'artist': '歌手:<h3.*?><a href="/artist/[0-9]+/".*?>(.+?)</a></h3>',
'lyricist': '作詞:<h4.*?>([^<]+)</h4>',
'composer': '作曲:<h4.*?>([^<]+)</h4>'
}
self.set_attr(patterns, html)
return ret
def get_lyric(url):
obj = UtaNet(url)
return obj.get()
def download_search_result():
url = 'http://www.uta-net.com/search/?Aselect=1&Bselect=3&Keyword=KOKIA&sort=6'
output = 'uta_net.search.txt'
html = common.get_url_content(url)
if not html:
logging.error('Failed to download url [%s]' % (url, ))
return False
pattern = '<td class="side td1"><a href="([^"]+)">'
import re
import urllib.parse
songs = re.findall(pattern, html)
out = open(output, 'wb')
for song in songs:
print(song)
song_url = urllib.parse.urljoin(site_url, song)
full = get_lyric(song_url)
out.write(full.encode('utf-8'))
out.write('\n\n=====\n')
out.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# download_search_result()
# exit()
# url = 'http://www.uta-net.com/song/181206/'
url = test_url
full = get_lyric(url)
if not full:
print('damn !')
exit()
print(full)
|
pypa/warehouse
|
warehouse/admin/__init__.py
|
Python
|
apache-2.0
| 2,024
| 0.000988
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from warehouse.admin.services import ISponsorLogoStorage
from warehouse.utils.static import ManifestCacheBuster
def includeme(config):
sponsorlogos_storage_class = config.maybe_dotted(
config.registry.settings["sponsorlogos.backend"]
)
config.register_service_factory(
sponsorlogos_storage_class.create_service, ISponsorLogoStorage
)
# Setup Jinja2 Render
|
ing for the Admin application
config.add_jinja2_search_path("templates", name=".html")
# Setup our static assets
prevent_http_cache = config.get_settings().get("pyramid.prevent_http_cache", False)
config.add_static_view(
"admin/static",
"warehouse.admin:static/dist",
# Don't cache at all if prevent_http_cache is true, else we'll cache
# the files for 10 years.
cache_ma
|
x_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.add_cache_buster(
"warehouse.admin:static/dist/",
ManifestCacheBuster(
"warehouse.admin:static/dist/manifest.json",
reload=config.registry.settings["pyramid.reload_assets"],
strict=not prevent_http_cache,
),
)
config.whitenoise_add_files("warehouse.admin:static/dist/", prefix="/admin/static/")
config.whitenoise_add_manifest(
"warehouse.admin:static/dist/manifest.json", prefix="/admin/static/"
)
# Add our routes
config.include(".routes")
# Add our flags
config.include(".flags")
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/__init__.py
|
Python
|
mit
| 2,176
| 0.011949
|
"""SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://sympy.org
"""
from __future__ import absolute_import, print_function
from sympy.release import __version__
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
|
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functi
|
ons import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, mathematica_code, octave_code, \
latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
|
edgedb/edgedb
|
tests/test_edgeql_expr_aliases.py
|
Python
|
apache-2.0
| 32,398
| 0
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2017-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os.path
from edb.testbase import server as tb
from edb.tools import test
import edgedb
class TestEdgeQLExprAliases(tb.QueryTestCase):
'''The scope is to test expression aliases.'''
SCHEMA = os.path.join(os.path.dirname(__file__), 'sc
|
hemas',
'cards.esdl')
SETUP = [os.path.join(os.path.dirname(__file__), 'schemas',
'cards_setup.edgeql')]
async def test_edgeql_aliases_basic_01(self):
await self.assert_query_resu
|
lt(
r'''
SELECT AirCard {
name,
owners: {
name
} ORDER BY .name
} ORDER BY AirCard.name;
''',
[
{
'name': 'Djinn',
'owners': [{'name': 'Carol'}, {'name': 'Dave'}]
},
{
'name': 'Giant eagle',
'owners': [{'name': 'Carol'}, {'name': 'Dave'}]
},
{
'name': 'Sprite',
'owners': [{'name': 'Carol'}, {'name': 'Dave'}]
}
],
)
async def test_edgeql_aliases_basic_02(self):
await self.con.execute('''
CREATE ALIAS expert_map := (
SELECT {
('Alice', 'pro'),
('Bob', 'noob'),
('Carol', 'noob'),
('Dave', 'casual'),
}
);
''')
await self.assert_query_result(
r'''
SELECT expert_map
ORDER BY expert_map;
''',
[
['Alice', 'pro'],
['Bob', 'noob'],
['Carol', 'noob'],
['Dave', 'casual'],
],
)
await self.con.execute('''
DROP ALIAS expert_map;
''')
async def test_edgeql_aliases_basic_03(self):
await self.con.execute('''
CREATE ALIAS scores := (
SELECT {
(name := 'Alice', score := 100, games := 10),
(name := 'Bob', score := 11, games := 2),
(name := 'Carol', score := 31, games := 5),
(name := 'Dave', score := 78, games := 10),
}
);
''')
await self.assert_query_result(
r'''
SELECT scores ORDER BY scores.name;
''',
[
{'name': 'Alice', 'score': 100, 'games': 10},
{'name': 'Bob', 'score': 11, 'games': 2},
{'name': 'Carol', 'score': 31, 'games': 5},
{'name': 'Dave', 'score': 78, 'games': 10},
],
)
await self.assert_query_result(
r'''
SELECT <tuple<str, int64, int64>>scores
ORDER BY scores.name;
''',
[
['Alice', 100, 10],
['Bob', 11, 2],
['Carol', 31, 5],
['Dave', 78, 10],
],
)
await self.assert_query_result(
r'''
SELECT <tuple<name: str, points: int64, plays: int64>>scores
ORDER BY scores.name;
''',
[
{'name': 'Alice', 'points': 100, 'plays': 10},
{'name': 'Bob', 'points': 11, 'plays': 2},
{'name': 'Carol', 'points': 31, 'plays': 5},
{'name': 'Dave', 'points': 78, 'plays': 10},
],
)
await self.con.execute('''
DROP ALIAS scores;
''')
async def test_edgeql_aliases_basic_04(self):
await self.con.execute('''
CREATE ALIAS levels := {'pro', 'casual', 'noob'};
''')
await self.assert_query_result(
r'''
SELECT levels;
''',
{'pro', 'casual', 'noob'},
)
async def test_edgeql_aliases_create_01(self):
await self.con.execute(r'''
CREATE ALIAS DCard := (
SELECT Card {
# This is an identical computable to the one
# present in the type, but it must be legal to
# override the link with any compatible
# expression.
owners := (
SELECT Card.<deck[IS User] {
name_upper := str_upper(.name)
}
)
} FILTER Card.name LIKE 'D%'
);
''')
await self.assert_query_result(
r'''
SELECT DCard {
name,
owners: {
name_upper,
} ORDER BY .name
} ORDER BY DCard.name;
''',
[
{
'name': 'Djinn',
'owners': [{'name_upper': 'CAROL'},
{'name_upper': 'DAVE'}],
},
{
'name': 'Dragon',
'owners': [{'name_upper': 'ALICE'},
{'name_upper': 'DAVE'}],
},
{
'name': 'Dwarf',
'owners': [{'name_upper': 'BOB'},
{'name_upper': 'CAROL'}],
}
],
)
await self.con.execute('DROP ALIAS DCard;')
# Check that we can recreate the alias.
await self.con.execute(r'''
CREATE ALIAS DCard := (
SELECT Card {
owners := (
SELECT Card.<deck[IS User] {
name_upper := str_upper(.name)
}
)
} FILTER Card.name LIKE 'D%'
);
''')
await self.assert_query_result(
r'''
WITH
MODULE schema,
DCardT := (SELECT ObjectType
FILTER .name = 'default::DCard'),
DCardOwners := (SELECT DCardT.links
FILTER .name = 'owners')
SELECT
DCardOwners {
target[IS ObjectType]: {
name,
pointers: {
name
} FILTER .name = 'name_upper'
}
}
''',
[{
'target': {
'name': 'default::__DCard__owners',
'pointers': [
{
'name': 'name_upper',
}
]
}
}]
)
async def test_edgeql_aliases_filter_01(self):
await self.assert_query_result(
r'''
SELECT FireCard {name}
FILTER FireCard = DaveCard
ORDER BY FireCard.name;
''',
[{'name': 'Dragon'}],
)
async def test_edgeql_aliases_filter02(self):
await self.assert_query_result(
r'''
SELECT AirCard {name}
FILTER AirCard NOT IN (SELECT
|
modelblocks/modelblocks-release
|
resource-general/scripts/itemmeasures2lineitems.py
|
Python
|
gpl-3.0
| 588
| 0.001701
|
import sys
sentid_prev = 0
first_line = True
first_word = Tr
|
ue
for line in sys.stdin:
row = line.strip().split()
if first_line:
word_ix = row.index('word')
sentid_ix = row.index('sentid')
first_line = False
else:
word = row[word_ix]
sentid = row[senti
|
d_ix]
if first_word:
delim = ''
first_word = False
elif sentid == sentid_prev:
delim = ' '
else:
delim = '\n'
sentid_prev = sentid
sys.stdout.write(delim + word)
sys.stdout.write('\n')
|
geggo/gpyfft
|
setup.py
|
Python
|
lgpl-3.0
| 3,106
| 0.00322
|
import os
import platform
from setuptools import setup, Extension
from distutils.util import convert_path
from Cython.Build import cythonize
system = platform.system()
## paths settings
# Linux
if 'Linux' in system:
CLFFT_DIR = r'/home/gregor/devel/clFFT'
CLFFT_LIB_DIRS = [r'/usr/local/lib64']
CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'src', 'include'), ]
CL_INCL_DIRS = ['/opt/AMDAPPSDK-3.0/include']
EXTRA_COMPILE_ARGS = []
EXTRA_LINK_ARGS = []
#Windows
elif 'Windows' in system:
CLFFT_DIR = r'C:\Users\admin\Devel\clFFT-Full-2.12.2-Windows-x64'
CLFFT_LIB_DIRS = [os.path.join(CLFFT_DIR, 'lib64\import')]
CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'include'), ]
CL_DIR = os.getenv('AMDAPPSDKROOT')
CL_INCL_DIRS = [os.path.join(CL_DIR, 'include')]
EXTRA_COMPILE_ARGS = []
EXTRA_LINK_ARGS = []
# macOS
elif 'Darwin' in system:
|
CLFFT_DIR = r'/Users/gregor/Devel/clFFT'
CLFFT_LIB_DIRS = [r'/Users/gregor/Devel/clFFT/src/library']
CLFFT_INCL_DIRS = [os.path.join(CLFFT_DIR, 'src', 'include'), ]
CL_INCL_DIRS = []
EXTRA_COMPILE_ARGS = ['-stdlib=libc++']
EXTRA_LINK_ARGS = ['-stdlib=libc++']
import Cython.Compiler.Options
Cython.Compiler.Options.generat
|
e_cleanup_code = 2
extensions = [
Extension("gpyfft.gpyfftlib",
[os.path.join('gpyfft', 'gpyfftlib.pyx')],
include_dirs= CLFFT_INCL_DIRS + CL_INCL_DIRS,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
libraries=['clFFT'],
library_dirs = CLFFT_LIB_DIRS,
language='c++',
)
]
def copy_clfftdll_to_package():
import shutil
shutil.copy(
os.path.join(CLFFT_DIR, 'bin', 'clFFT.dll'),
'gpyfft')
shutil.copy(
os.path.join(CLFFT_DIR, 'bin', 'StatTimer.dll'),
'gpyfft')
print("copied clFFT.dll, StatTimer.dll")
package_data = {}
if 'Windows' in platform.system():
copy_clfftdll_to_package()
package_data.update({'gpyfft': ['clFFT.dll', 'StatTimer.dll']},)
def get_version():
main_ns = {}
version_path = convert_path('gpyfft/version.py')
with open(version_path) as version_file:
exec(version_file.read(), main_ns)
version = main_ns['__version__']
return version
def get_readme():
dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dirname, "README.md"), "r") as fp:
long_description = fp.read()
return long_description
install_requires = ["numpy", "pyopencl"]
setup_requires = ["numpy", "cython"]
setup(
name='gpyfft',
version=get_version(),
description='A Python wrapper for the OpenCL FFT library clFFT',
long_description=get_readme(),
url=r"https://github.com/geggo/gpyfft",
maintainer='Gregor Thalhammer',
maintainer_email='gregor.thalhammer@gmail.com',
license='LGPL',
packages=['gpyfft', "gpyfft.test"],
ext_modules=cythonize(extensions),
package_data=package_data,
install_requires=install_requires,
setup_requires=setup_requires,
)
|
RRCKI/panda-server
|
pandaserver/test/execute.py
|
Python
|
apache-2.0
| 2,194
| 0.014585
|
import sys
import time
import commands
import userinterface.Client as Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
if len(sys.argv)>1:
site = sys.argv[1]
else:
site = None
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
destName = 'BNL_ATLAS_2'
jobList = []
for i in range(20):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = commands.getoutput('uuidgen')
job.AtlasRelease = 'Atlas-11.0.41'
#job.AtlasRelease = 'Atlas-11.0.3'
job.homepackage = 'AnalysisTransforms'
job.transformation = 'https://gridui01.usatlas.bnl.gov:24443/dav/test/runAthena'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 100
job.prodSourceLabel = 'user'
job.computingSite = site
#job.prodDBlock = "pandatest.b1599dfa-cd36-4fc5-92f6-495781a94c66"
job.prodDBlock = "pandatest.f228b051-077b-4f81-90bf-496340644379"
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = "lib.f228b051-077b-4f81-90bf-496340644379.tgz"
fileI.type = 'input'
job.addFile(fileI)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % commands.getoutput('uuidgen')
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destination
|
SE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
file
|
OZ = FileSpec()
fileOZ.lfn = "%s.pool.root" % commands.getoutput('uuidgen')
fileOZ.destinationDBlock = job.destinationDBlock
fileOZ.destinationSE = job.destinationSE
fileOZ.dataset = job.destinationDBlock
fileOZ.type = 'output'
job.addFile(fileOZ)
job.jobParameters="""-l %s -r PhysicsAnalysis/AnalysisCommon/UserAnalysis/UserAnalysis-00-05-11/run -j " jobOptions.pythia.py" -i "[]" -o "{'Stream1': '%s'}" """ % (fileI.lfn,fileOZ.lfn)
jobList.append(job)
s,o = Client.submitJobs(jobList)
print "---------------------"
print s
for x in o:
print "PandaID=%s" % x[0]
|
metpy/MetPy
|
v0.8/_downloads/upperair_soundings.py
|
Python
|
bsd-3-clause
| 7,536
| 0.001725
|
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===========================
Upper Air Sounding Tutorial
===========================
Upper air analysis is a staple of many synoptic and mesoscale analysis
problems. In this tutorial we will gather weather balloon data, plot it,
perform a series of thermodynamic calculations, and summarize the results.
To learn more about the Skew-T diagram and its use in weather analysis and
forecasting, checkout `this <https://homes.comet.ucar.edu/~alanbol/aws-tr-79-006.pdf>`_
air weather service guide.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# Upper air data can be obtained using the siphon package, but for this tutorial we will use
# some of MetPy's sample data. This event is the Veterans Day tornado outbreak in 2002.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('nov11_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
##########################################################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.get_wind_components(wind_speed, wind_dir)
##########################################################################
# Thermodynamic Calculations
# --------------------------
#
# Often times we will want to calculate some thermodynamic parameters of a
# sounding. The MetPy calc module has many such calculations already implemented!
#
# * **Lifting Condensation Level (LCL)** - The level at which an air parcel's
# relative humidity becomes 100% when lifted along a dry adiabatic path.
# * **Parcel Path** - Path followed by a hypothetical parcel of air, beginning
# at the surface temperature/pressure and rising dry adiabatically until
# reaching the LCL, then rising moist adiabatially.
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
##########################################################################
# Basic Skew-T Plotting
# ---------------------
#
# The Skew-T (log-P) diagram is the standard way to view rawinsonde data. The
# y-axis is height in pressure coordinates and the x-axis is temperature. The
# y coordinates are plotted on a logarithmic scale and the x coordinate system
# is skewed. An explanation of skew-T interpretation is beyond the scope of this
# tutorial, but here we will plot one that can be used fo
|
r analysis or
# publication.
#
# The most basic skew-T can be plotted with only five lines of Python.
# These lines perform the following tasks:
#
# 1. Create a ``Figure`` object and set the size of the figure.
#
# 2. Create a ``SkewT`` object
#
# 3. Plot the pressure and temperature (note that the pressure,
# the independent variable, is first even though it is plotted on the y-axis).
#
# 4. Plot the pressure and dewpoint temperature.
#
# 5. Plot the wind bar
|
bs at the appropriate pressure using the u and v wind
# components.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(p, u, v)
# Show the plot
plt.show()
##########################################################################
# Advanced Skew-T Plotting
# ------------------------
#
# Fiducial lines indicating dry adiabats, moist adiabats, and mixing ratio are
# useful when performing further analysis on the Skew-T diagram. Often the
# 0C isotherm is emphasized and areas of CAPE and CIN are shaded.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
##########################################################################
# Adding a Hodograph
# ------------------
#
# A hodograph is a polar representation of the wind profile measured by the rawinsonde.
# Winds at different levels are plotted as vectors with their tails at the origin, the angle
# from the vertical axes representing the direction, and the length representing the speed.
# The line plotted on the hodograph is a line connecting the tips of these vectors,
# which are not drawn.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Create a hodograph
# Create an inset axes object that is 40% width and height of the
# figure and put it in the upper right hand corner.
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, wind_speed) # Plot a line colored by wind speed
# Show the plot
plt.show()
|
aolsux/SamuROI
|
doc/examples/script.py
|
Python
|
mit
| 4,482
| 0.004685
|
import numpy
from samuroi.gui.samuroiwindow import SamuROIWindow
from samuroi.plugins.tif import load_tif
from samuroi.plugins.swc import load_swc
from samuroi.masks.segmentation import Segmentation as SegmentationMask
# requirements for template matching and post processing
from samuroi.event.biexponential import BiExponentialParameters
from samuroi.event.template_matching import template_matching
from samuroi.util.postprocessors import PostProcessorPipe, DetrendPostProcessor
import sys
from PyQt4 import QtGui
import argparse
parser = argparse.ArgumentParser(description='Open SamuROI and load some data.')
parser.add_argument('filename', type=str, help='The filename of the tif file to use as data.')
parser.add_argument('--swc', dest='swcfiles', type=str, action='append', help='Filename of swc file to load.')
parser.add_argument('--segmentation', dest='segmentations', type=str, action='append',
help='Filename of segmentations to load. (.npy files)')
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
args = parser.parse_args()
data = load_tif(args.filename)
morphology = numpy.max(data, axis=-1)
from samuroi.plugins.baseline import linbleeched_deltaF
# data = linbleeched_deltaF(data)
# show the gui for the filtered data
mainwindow = SamuROIWindow(data=data, morphology=morphology)
for filename in args.swcfiles:
swc = load_swc(filename)
mainwindow.segmentation.load_swc(swc)
if args.segmentations is not None:
for filename in args.segmentations:
segdata = numpy.load(filename)
seg = SegmentationMask(data=segdata, name="filename")
mainwindow.segmentation.masks.add(seg)
# here we can set the template parameters
params = BiExponentialParameters(tau1=150., tau2=1.)
kernel = params.kernel()
# crop the long decay phase of the kernel, otherwise boundary effects get to strong
# and bursts of events cannot be detected correctly, since the do not fully decay
kernel = kernel[0:120]
# if required one can zero pad the kernel to the left to enforce a "silent" phase before an event
# this will again lead to trouble when detecting bursts of events
# kernel = numpy.concatenate((numpy.zeros(number_of_required_silent_frames), kernel))
def matching_postprocess(trace):
# run the template matching algorithm
result = template_matching(data=trace, kernel=kernel, threshold=4.)
return result.crit
# we either can use the matching postprocessor directly, or add a detrend step in front of it
postprocessor = PostProcessorPipe()
postprocessor.append(DetrendPostProcessor())
postprocessor.append(matching_postprocess)
# add a button to the main window postprocessor toolbar for enabling the template matching
action = mainwindow.toolbar_postprocess.addAction("template matching")
action.setToolTip("Run first linear detrend and then apply the template matching to the trace, then show the"
"detection criterion instead of the trace data.")
# a variable for the line plotting the best fit in the trace widget
fitcurve = None
def install_pp(pp):
if fitcurve is not None:
fitcu
|
rve.remove()
mainwindow.segmentation.postprocessor = postprocessor
# if we click the button in the main window to install the postprocessor
action.triggered.connect(install_pp)
def redraw_fit():
global fitcurve
# the index of the frame of interest
i = mainwindow.segmentation.active_frame
# first shift to the active fr
|
ame, then go back half the kernel size, because the values in we want to plot
# the kernel centered around the selected frame
x = numpy.arange(0, len(kernel)) + i - len(kernel) / 2
if fitcurve is not None:
fitcurve.remove()
# we want to calculate the fit for the first cuve in the trace widget, hence, get the y-data of the line
_, trace = mainwindow.tracedockwidget.canvas.axes.lines[0].get_data()
result = template_matching(data=trace, kernel=kernel, threshold=4.)
# we need to apply the best found scale and offset to the kernel
fitcurve, = mainwindow.tracedockwidget.canvas.axes.plot(x, kernel * result.s[i] + result.c[i])
mainwindow.segmentation.active_frame_changed.append(redraw_fit)
mainwindow.show()
sys.exit(app.exec_())
|
andersonsilvade/python_C
|
Python32/aulas/hakeandositeprecodescontowhiletemposimounao.py
|
Python
|
mit
| 607
| 0.013559
|
import urllib.request
import time
def pega_preço():
pagina = urllib.r
|
equest.urlopen('http://beans.itcarlow.ie/prices-loyalty.html')
texto = pagina.read().decode('utf8')
onde = texto.find('>$')
inicio= onde + 2
fim = inicio + 4
return float(texto[inicio:fim])
opção = input("deseja comprar já? (S/N)")
if opção == 'S' :
preço = pega_preço()
print('Você comprou por %5.2f R$' % preço)
else:
preço = 99.99
while preço >= 4.74:
preço = pega_preço()
if preço >= 4.74:
|
time.sleep(5)
print ('comprar ! Preço: %5.2f' %preço)
|
hiviah/perspectives-observatory
|
utilities/svg_client.py
|
Python
|
gpl-3.0
| 1,448
| 0.015884
|
# This file is part of the Perspectives Notary Server
#
# Copyright (C) 2011 Dan Wendlandt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import traceback
from client_com
|
mon import verify_notary_signature, fetch_notary_xml, parse_http_notary_list
from generate_svg import get_svg_graph
if len(sys.argv) != 4:
print "usage: %s <service-id> <notary-list-file> <len-days>" % sys.argv[0]
exit(1)
sid = sys.argv[1]
server_list = parse_http_notary_list(sys.argv[2])
fo
|
r s in server_list:
try:
s["results"] = None
server = s["host"].split(":")[0]
port = s["host"].split(":")[1]
code, xml_text = fetch_notary_xml(server,int(port), sid)
if code == 200 and verify_notary_signature(sid, xml_text, s["public_key"]):
s["results"] = xml_text
except Exception, e:
pass
print get_svg_graph(sid, server_list, int(sys.argv[3]), time.time())
|
brython-dev/brython
|
www/src/Lib/unittest/test/test_discovery.py
|
Python
|
bsd-3-clause
| 34,059
| 0.001556
|
import os.path
from os.path import abspath
import re
import sys
import types
import pickle
from test import support
from test.support import import_helper
import test.test_importlib.util
import unittest
import unittest.mock
import unittest.test
class TestableTestProgram(unittest.TestProgram):
module = None
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test2.py', 'test1.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test4.py', 'test3.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
# The test suites found should be sorted alphabetically for reliable
# execution order.
expected = [[name + ' module tests'] for name in
('test1', 'test2', 'test_dir')]
expected.extend([[('test_dir.%s' % name) + ' module tests'] for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_socket(self):
# A socket is neither a directory nor a regular file.
# https://bugs.python.org/issue25320
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['socket']]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: False
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
self.assertEqual(suite, [])
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module.path + ' module tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the a_directory and test_directory2
# directly and via load_tests for the test_directory package, which
# still calls the baseline module loader.
self.assertEqual(suite,
[['a_directory module tests'],
['test_directory load_tests',
|
'test_directory module tests'],
['test_directory2 module tests']])
# The test module paths should be sorted for reliable execution order
self.assertEqual(Module.paths,
['a_directory', 'test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
# (but there are n
|
o tests in our stub module itself, so that is [] at
# the time of call).
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*')])
def test_find_tests_default_calls_package_load_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(resto
|
mcepl/rope
|
rope/refactor/similarfinder.py
|
Python
|
lgpl-3.0
| 12,522
| 0.00008
|
"""This module can be used for finding similar code"""
import re
import rope.refactor.wildcards
from rope.base import libutils
from rope.base import codeanalyze, exceptions, ast, builtins
from rope.refactor import (patchedast, wildcards)
from rope.refactor.patchedast import MismatchedTokenError
class BadNameInCheckError(exceptions.RefactoringError):
pass
class SimilarFinder(object):
"""`SimilarFinder` can be used to find similar pieces of code
See the notes in the `rope.refactor.restructure` module for more
info.
"""
def __init__(self, pymodule, wildcards=None):
"""Construct a SimilarFinder"""
self.source = pymodule.source_code
try:
self.raw_finder = RawSimilarFinder(
pymodule.source_code, pymodule.get_ast(), self._does_match)
except MismatchedTokenError:
print("in file %s" % pymodule.resource.path)
raise
self.pymodule = pymodule
if wildcards is None:
self.wildcards = {}
for wildcard in [rope.refactor.wildcards.
DefaultWildcard(pymodule.pycore.project)]:
self.wildcards[wildcard.get_name()] = wildcard
else:
self.wildcards = wildcards
def get_matches(self, code, args={}, start=0, end=None):
self.args = args
if end is None:
end = len(self.source)
skip_region = None
if 'skip' in args.get('', {}):
resource, region = args['']['skip']
if resource == self.pymodule.get_resource():
skip_region = region
return self.raw_finder.get_matches(code, start=start, end=end,
skip=skip_region)
def get_match_regions(self, *args, **kwds):
for match in self.get_matches(*args, **kwds):
yield match.get_region()
def _does_match(self, node, name):
arg = self.args.get(name, '')
kind = 'default'
if isinstance(arg, (tuple, list)):
kind = arg[0]
arg = arg[1]
suspect = wildcards.Suspect(self.pymodule, node, name)
return self.wildcards[kind].matches(suspect, arg)
class RawSimilarFinder(object):
"""A class for finding similar expressions and statements"""
def __init__(self, source, node=None, does_match=None):
if node is None:
node = ast.parse(source)
if does_match is None:
self.does_match = self._simple_does_match
else:
self.does_match = does_match
self._init_using_ast(node, source)
def _simple_does_match(self, node, name):
return isinstance(node, (ast.expr, ast.Name))
def _init_using_ast(self, node, source):
self.source = source
self._matched_asts = {}
if not hasattr(node, 'region'):
patchedast.patch_ast(node, source)
self.ast = node
def get_matches(self, code, start=0, end=None, skip=None):
"""Search for `code` in source and return a list of `Match`\es
`code` can contain wildcards. ``${name}`` matches normal
names and ``${?name} can match any expression. You can use
`Match.get_ast()` for getting the node that has matched a
given pattern.
"""
if end is None:
end = len(self.source)
for match in self._get_matched_asts(code):
match_start, match_end = match.get_region()
if start <= match_start and match_end <= end:
if skip is not None and (skip[0] < match_end and
skip[1] > match_start):
continue
yield match
def _get_matched_asts(self, code):
if code not in self._matched_asts:
wanted = self._create_pattern(code)
matches = _ASTMatcher(self.ast, wanted,
self.does_match).find_matches()
self._matched_asts[code] = matches
return self._matched_asts[code]
def _create_pattern(self, expression):
expression = self._replace_wildcards(expression)
node = ast.parse(expression)
# Getting Module.Stmt.nodes
nodes = node.body
if len(nodes) == 1 and isinstance(nodes[0], ast.Expr):
# Getting Discard.expr
wanted = nodes[0].value
else:
wanted = nodes
return wanted
def _replace_wildcards(self, expression):
ropevar = _RopeVariable()
template = CodeTemplate(expression)
mapping = {}
for name in template.get_names():
mapping[name] = ropevar.get_var(name)
return template.substitute(mapping)
class _ASTMatcher(object):
def __init__(self, body, pattern, does_match):
"""Searches the given pattern in the body AST.
body is an AST node and pattern can be either an AST node or
a list of ASTs nodes
"""
self.body = body
self.pattern = pattern
self.matches = None
self.ropevar = _RopeVariable()
self.matches_callback = does_match
def find_matches(self):
if self.matches is None:
self.matches = []
ast.call_for_nodes(self.body, self._check_node, recursive=True)
return self.matches
def _check_node(self, node):
if isinstance(self.pattern, list):
self._check_statements(node)
else:
self._check_expression(node)
def _check_expression(self, node):
mapping = {}
if self._match_nodes(self.pattern, node, mapping):
self.matches.append(ExpressionMatch(node, mapping))
def _check_statements(self, node):
for child in ast.get_children(node):
if isinstance(child, (list, tuple)):
self.__check_stmt_list(child)
def __check_stmt_list(self, nodes):
for index in range(len(nodes)):
if len(nodes) - index >= len(self.pattern):
current_stmts = nodes[index:index + len(self.pattern)]
mapping = {}
if self._match_stmts(current_stmts, mapping):
self.matches.append(StatementMatch(current_stmts, mapping))
def _match_nodes(self, expected, node, mapping):
if isinstance(expected, ast.Name):
if self.ropevar.is_var(expected.id):
return self._match_wildcard(expected, node, mapping)
if not isinstance(expected, ast.AST):
return expected == node
if expected.__class__ != node.__class__:
return False
children1 = self._get
|
_children(expected)
children2 = self._get_children(node)
if len(children1) != len(children2):
return False
for child1, child2 in zip(children1, children2):
if isinstance(child1, ast.AST):
if not self._match_nodes(child1, child2, mapping):
return False
elif isinstance(child1, (list, tuple)):
if not isinstance(child2, (list, tuple)) or \
len(child1) != len(child2):
|
return False
for c1, c2 in zip(child1, child2):
if not self._match_nodes(c1, c2, mapping):
return False
else:
if type(child1) is not type(child2) or child1 != child2:
return False
return True
def _get_children(self, node):
"""Return not `ast.expr_context` children of `node`"""
children = ast.get_children(node)
return [child for child in children
if not isinstance(child, ast.expr_context)]
def _match_stmts(self, current_stmts, mapping):
if len(current_stmts) != len(self.pattern):
return False
for stmt, expected in zip(current_stmts, self.pattern):
if not self._match_nodes(expected, stmt, mapping):
return False
return True
def _match_wildcard(self, node1, node2, mapping):
name = self.ropevar.get_base(node1.id)
if name not in mapping:
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/dimension.py
|
Python
|
mit
| 1,562
| 0.00128
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dimension(Model):
"""Dimension of a resource metric. For e.g. instance specific HTTP requests
for a web app,
where instance name is dimension of the metric HTTP request.
:param name:
:type name: str
:param display_name:
:type display_name: st
|
r
:param internal_name:
:type internal_name: str
:param to_be_exported_fo
|
r_shoebox:
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(self, name=None, display_name=None, internal_name=None, to_be_exported_for_shoebox=None):
super(Dimension, self).__init__()
self.name = name
self.display_name = display_name
self.internal_name = internal_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
|
Dangetsu/vnr
|
Frameworks/Sakura/py/apps/reader/dialogs/retest.py
|
Python
|
gpl-3.0
| 5,176
| 0.010626
|
# coding: utf8
# retest.py
# 12/16/2012 jichi
__all__ = 'RegExpTester',
if __name__ == '__main__':
import sys
sys.path.append('..')
import debug
debug.initenv()
import re
from PySide.QtCore import Qt
from Qt5 import QtWidgets
from sakurakit import skqss
from sakurakit.skclass import memoizedproperty
from sa
|
kurakit.skdebug import dprint
from sakurakit.sktr import tr_
from mytr import mytr_
import rc
def create_label(tex
|
t=""): # unicode -> QLabel
ret = QtWidgets.QLabel()
if text:
ret.setText(text + ":")
ret.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
return ret
class _RegExpTester(object):
def __init__(self, q):
self._createUi(q)
self._refresh()
def _createUi(self, q):
#url = "http://en.wikipedia.org/wiki/Regular_expression"
url = "http://www.regular-expressions.info/lookaround.html"
self.textEdit.appendHtml(
"""You can use this tester to play with the regular expression
(<a href="%s">%s</a>) used in the Shared Dictionary.
<br/><br/>
For example, "regular(?= exp)" will match all "regular" before " exp".
""" % (url, url))
self.patternEdit.setText("regular(?= exp)")
self.replaceEdit.setText("HELLO WORLD")
for sig in (
self.textEdit.textChanged,
self.patternEdit.textChanged,
self.replaceEdit.textChanged,
self.regexCheckBox.toggled,
self.icaseCheckBox.toggled,
):
sig.connect(self._refresh)
layout = QtWidgets.QVBoxLayout()
grid = QtWidgets.QGridLayout()
# 0
grid.addWidget(create_label(tr_("Pattern")), 0, 0)
grid.addWidget(self.patternEdit, 0, 1)
# 1
grid.addWidget(create_label(tr_("Translation")))
grid.addWidget(self.replaceEdit)
# 2
grid.addWidget(create_label(tr_("Status")))
grid.addWidget(self.messageEdit)
layout.addLayout(grid)
row = QtWidgets.QHBoxLayout()
row.addWidget(self.regexCheckBox)
row.addWidget(self.icaseCheckBox)
layout.addLayout(row)
splitter = QtWidgets.QSplitter(Qt.Vertical)
splitter.addWidget(self.textEdit)
splitter.addWidget(self.textView)
layout.addWidget(splitter)
q.setLayout(layout)
def _refresh(self):
"""
@param text unicode
@return unicode
"""
text = self.textEdit.toPlainText()
pattern = self.patternEdit.text().strip()
repl = self.replaceEdit.text().strip()
r = self.regexCheckBox.isChecked()
i = self.icaseCheckBox.isChecked()
result = text
try:
if r and i:
rx = re.compile(pattern, re.IGNORECASE|re.DOTALL)
result = rx.sub(repl, text)
elif r:
result = re.sub(pattern, repl, text)
elif i:
pattern = re.escape(pattern)
rx = re.compile(pattern, re.IGNORECASE|re.DOTALL)
result = rx.sub(repl, text)
else:
result = text.replace(pattern, repl)
matched = result != text
message = tr_("Found") if matched else tr_("Not found")
skqss.class_(self.messageEdit, 'default')
self.messageEdit.setText(message)
except Exception, e:
skqss.class_(self.messageEdit, 'error')
message = e.message or "%s" % e
self.messageEdit.setText(message)
self.textView.setHtml(result)
@memoizedproperty
def textView(self):
ret = QtWidgets.QTextBrowser()
skqss.class_(ret, 'texture')
ret.setToolTip(tr_("Target"))
ret.setOpenExternalLinks(True)
#ret.setAcceptRichText(False)
return ret
@memoizedproperty
def textEdit(self):
ret = QtWidgets.QPlainTextEdit()
skqss.class_(ret, 'normal')
ret.setToolTip(tr_("Source"))
return ret
@memoizedproperty
def patternEdit(self):
ret = QtWidgets.QLineEdit()
skqss.class_(ret, 'normal')
ret.setToolTip(mytr_("Matched text"))
ret.setPlaceholderText(ret.toolTip())
return ret
@memoizedproperty
def replaceEdit(self):
ret = QtWidgets.QLineEdit()
skqss.class_(ret, 'normal')
ret.setToolTip(mytr_("Replaced text"))
ret.setPlaceholderText(ret.toolTip())
return ret
@memoizedproperty
def messageEdit(self):
ret = QtWidgets.QLineEdit()
ret.setReadOnly(True)
ret.setToolTip(tr_("Status"))
ret.setPlaceholderText(ret.toolTip())
return ret
@memoizedproperty
def regexCheckBox(self):
ret = QtWidgets.QCheckBox()
ret.setText(tr_("Regular expression"))
ret.setToolTip(tr_("Regular expression"))
ret.setChecked(True)
return ret
@memoizedproperty
def icaseCheckBox(self):
ret = QtWidgets.QCheckBox()
ret.setText(tr_("Ignore case"))
ret.setToolTip(tr_("Ignore case"))
#ret.setChecked(True)
return ret
# I have to use QMainWindow, or the texture will not work
class RegExpTester(QtWidgets.QDialog):
def __init__(self, parent=None):
WINDOW_FLAGS = Qt.Dialog|Qt.WindowMinMaxButtonsHint
super(RegExpTester, self).__init__(parent, WINDOW_FLAGS)
skqss.class_(self, 'texture')
self.__d = _RegExpTester(self)
self.setWindowTitle(mytr_("Test Regular Expression"))
self.setWindowIcon(rc.icon('window-regexp'))
self.resize(380, 350)
dprint("pass")
if __name__ == '__main__':
a = debug.app()
w = RegExpTester()
w.show()
a.exec_()
# EOF
|
ricorx7/rti_python
|
ADCP/Predictor/MaxVelocity.py
|
Python
|
bsd-3-clause
| 6,635
| 0.004823
|
import json
import os
import math
import pytest
def calculate_max_velocity(**kwargs):
"""
Calculate the maximum velocity the ADCP can measure including the boat speed in m/s. This speed is the
speed the ADCP is capable of measuring, if the speed exceeds this value, then the data will be incorrect
due to rollovers.
:param _CWPBB_ Broadband or Narrowband.
:param CWPBB_LagLength=: WP lag length in meters.
:param CWPBS=: Bin Size.
:param BeamAngle=: Beam angle in degrees.
:param SystemFrequency=: System frequency in hz.
:param SpeedOfSound=: Speed of Sound in m/s.
:param CyclesPerElement=: Cycles per element.
:return: Maximum velocity the ADCP can read in m/s.
"""
# Get the configuration from the json file
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error getting the configuration file. MaxVelocity", e)
return 0.0
# _CWPBB_LagLength_, _BeamAngle_, _SystemFrequency_, _SpeedOfSound_, _CyclesPerElement_
return _calculate_max_velocity(kwargs.pop('CWPBB', config['DEFAULT']['CWPBB']),
kwargs.pop('CWPBB_LagLength', config['DEFAULT']['CWPBB_LagLength']),
kwargs.pop('CWPBS', config['DEFAULT']['CWPBS']),
kwargs.pop('BeamAngle', config['BeamAngle']),
kwargs.pop('SystemFrequency', config['DEFAULT']['SystemFrequency']),
kwargs.pop('SpeedOfSound', config['SpeedOfSound']),
kwargs.pop('CyclesPerElement', config['CyclesPerElement']))
def _calculate_max_velocity(_CWPBB_, _CWPBB_LagLength_, _CWPBS_, _BeamAngle_, _SystemFrequency_, _SpeedOfSound_, _CyclesPerElement_):
"""
Calculate the maximum velocity the ADCP can measure including the boat speed in m/s. This speed is the
speed the ADCP is capable of measuring, if the speed exceeds this value, then the data will be incorrect
due to rollovers.
:param _CWPBB_ Broadband or Narrowband.
:param _CWPBB_LagLength_: WP lag length in meters.
:param _BeamAngle_: Beam angle in degrees.
:param _SystemFrequency_: System frequency in hz.
:param _SpeedOfSound_: Speed of Sound in m/s.
:param _CyclesPerElement_: Cycles per element.
:return: Maximum velocity the ADCP can read in m/s.
"""
# Get the configuration from the json file
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error getting the configuration file. MaxVelocity", e)
return 0.0
# Prevent divide by 0
if _CyclesPerElement_ == 0:
_CyclesPerElement_ = 1
if _SpeedOfSound_ == 0:
_SpeedOfSound_ = 1490
if _SystemFrequency_ == 0:
_SystemFrequency_ = config["DEFAULT"]["1200000"]["FREQ"]
# Sample Rate
sumSampling = 0.0;
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]: # 1200 khz
sumSampling += config["DEFAULT"]["1200000"]["SAMPLING"] * config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]): # 600 khz
sumSampling += config["DEFAULT"]["600000"]["SAMPLING"] * config["DEFAULT"]["600000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]): # 300 khz
sumSampling += config["DEFAULT"]["300000"]["SAMPLING"] * config["DEFAULT"]["300000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]): # 150 khz
sumSampling += config["DEFAULT"]["150000"]["SAMPLING"] * config["DEFAULT"]["150000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]): # 75 khz
sumSampling += config["DEFAULT"]["75000"]["SAMPLING"] * config["DEFAULT"]["75000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]): # 38 khz
sumSampling += co
|
nfig["DEFAULT"]["38000"]["SAMPLING"] * config["DEFAULT"]["38000"]["CPE"] / _CyclesPerElement_
sampleRate = _SystemFrequency_ * (sumSampling)
# Meters Per Sample
metersPerSample = 0
if
|
sampleRate == 0:
metersPerSample = 0.0
else:
metersPerSample = math.cos(_BeamAngle_ / 180.0 * math.pi) * _SpeedOfSound_ / 2.0 / sampleRate
# Lag Samples
lagSamples = 0
if metersPerSample == 0:
lagSamples = 0
else:
lagSamples = 2 * math.trunc((math.trunc(_CWPBB_LagLength_ / metersPerSample) + 1.0) / 2.0)
# Ua Hz
uaHz = 0.0
if lagSamples == 0:
uaHz = 0.0
else:
uaHz = sampleRate / (2.0 * lagSamples)
# Ua Radial
uaRadial = 0.0
if _SystemFrequency_ == 0:
uaRadial = 0.0
else:
uaRadial = uaHz * _SpeedOfSound_ / (2.0 * _SystemFrequency_)
#### NARROWBAND ####
# Beam Angle Radian
beamAngleRad = _BeamAngle_ / 180.0 * math.pi
# Ta
Ta = 2.0 * _CWPBS_ / _SpeedOfSound_ / math.cos(beamAngleRad)
# L
L = 0.5 * _SpeedOfSound_ * Ta
# Check for vertical beam.No Beam angle
if _BeamAngle_ == 0:
return uaRadial
# Narrowband lag length
if _CWPBB_ == 0:
return L / math.sin(_BeamAngle_ / 180.0 * math.pi)
return uaRadial / math.sin(_BeamAngle_ / 180.0 * math.pi)
# UNIT TEST
# Run with pytext MaxVelocity.py
def test_narrowband():
assert pytest.approx(calculate_max_velocity(CWPBB=0, CWPBB_LagLength=1.0, CWPBS=0.60, BeamAngle=20, SystemFrequency=1152000, SpeedOfSound=1467), 0.001) == 1.867
def test_broadband():
assert pytest.approx(calculate_max_velocity(CWPBB=1, CWPBB_LagLength=1.0, CWPBS=0.60, BeamAngle=20, SystemFrequency=1152000, SpeedOfSound=1490), 0.001) == 0.658
def test_broadband300():
assert pytest.approx(calculate_max_velocity(CWPBB=1, CWPBB_LagLength=1.0, CWPBS=4.0, BeamAngle=20, SystemFrequency=288000.0, SpeedOfSound=1490), 0.001) == 2.669
|
lukesummer/vnpy
|
vn.sgit/pyscript/generate_md_functions.py
|
Python
|
mit
| 11,152
| 0.003639
|
# encoding: UTF-8
__author__ = 'CHENXY'
from string import join
from sgit_struct import structDict
def processCallBack(line):
orignalLine = line
line = line.replace(' virtual void ', '') # 删除行首的无效内容
line = line.replace('{};\n', '') # 删除行尾的无效内容
content = line.split('(')
cbName = content[0] # 回调函数名称
cbArgs = content[1] # 回调函数参数
if cbArgs[-1] == ' ':
cbArgs = cbArgs.replace(') ', '')
else:
cbArgs = cbArgs.replace(')', '')
cbArgsList = cbArgs.split(', ') # 将每个参数转化为列表
cbArgsTypeList = []
cbArgsValueList = []
for arg in cbArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
if 'struct' not in content:
cbArgsTypeList.append(content[0]) # 参数类型列表
cbArgsValueList.append(content[1]) # 参数数据列表
else:
print content
cbArgsTypeList.append(content[1]) # 参数类型列表
cbArgsValueList.append(content[2]+content[3]) # 参数数据列表
createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine)
createProcess(cbName, cbArgsTypeList, cbArgsValueList)
# 生成.h文件中的process部分
process_line = 'void process' + cbName[2:] + '(Task task);\n'
fheaderprocess.write(process_line)
fheaderprocess.write('\n')
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error) {};\n'
elif 'OnRspQry' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data) {};\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error) {};\n'
else:
on_line = ''
fheaderon.write(on_line)
fheaderon.write('\n')
# 生成封装部分
createWrap(cbName)
#----------------------------------------------------------------------
def createWrap(cbName):
"""在Python封装段代码中进行处理"""
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error)\n'
override_line = '("on' + cbName[2:] + '")(error);\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last)\n'
override_line = '("on' + cbName[2:] + '")(data, error, id, last);\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data)\n'
override_line = '("on' + cbName[2:] + '")(data);\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error)\n'
override_line = '("on' + cbName[2:] + '")(data, error);\n'
else:
on_line = ''
if on_line is not '':
fwrap.write(on_line)
fwrap.write('{\n')
fwrap.write('\ttry\n')
fwrap.write('\t{\n')
fwrap.write('\t\tthis->get_override'+override_line)
fwrap.write('\t}\n')
fwrap.write('\tcatch (error_already_set const &)\n')
fwrap.write('\t{\n')
fwrap.write('\t\tPyErr_Print();\n')
fwrap.write('\t}\n')
fwrap.write('};\n')
fwrap.write('\n')
def createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine):
# 从回调函数生成任务对象,并放入队列
funcline = orignalLine.replace(' virtual void ', 'void ' + apiName + '::')
funcline = funcline.replace('{};', '')
funcline = funcline.replace(' {}', '')
ftask.write(funcline)
ftask.write('{\n')
ftask.write("\tTask task = Task();\n")
ftask.write("\ttask.task_name = " + cbName.upper() + ";\n")
# define常量
global define_count
fdefine.write("#define " + cbName.upper() + ' ' + str(define_count) + '\n')
define_count = define_count + 1
# switch段代码
fswitch.write("case " + cbName.upper() + ':\n')
fswitch.write("{\n")
fswitch.write("\tthis->" + cbName.replace('On', 'process') + '(task);\n')
fswitch.write("\tbreak;\n")
fswitch.write("}\n")
fswitch.write("\n")
for i, type_ in enumerate(cbArgsTypeList):
if type_ == 'int':
ftask.write("\ttask.task_id = " + cbArgsValueList[i] + ";\n")
elif type_ == 'bool':
ftask.write("\ttask.task_last = " + cbArgsValueList[i] + ";\n")
elif 'CSgitFtdcRspInfoField' in type_:
ftask.write("\n")
ftask.write("\tif (pRspInfo)\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_error = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\tCSgitFtdcRspInfoField empty_error = CSgitFtdcRspInfoField();\n")
ftask.write("\t\tmemset(&empty_error, 0, sizeof(empty_error));\n")
ftask.write("\t\ttask.task_error = empty_error;\n")
ftask.write("\t}\n")
else:
ftask.write("\n")
ftask.write("\tif (" + cbArgsValueList[i][1:] + ")\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_data = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\t" + type_ + " empty_data = " + type_ + "();\n")
ftask.write("\t\tmemset(&empty_data, 0, sizeof(empty_data));\n")
ftask.write("\t\ttask.task_data = empty_d
|
ata;\n")
ftask.write("\t}\n")
ftask.write("\tthis->task_queue.push(task);\n")
ftask.write("};\n")
ftask.write("\n")
def createProcess(cbName, cbArgsTypeList, cbArgsValueList):
# 从队列中提取任务,并转化为python字典
fprocess
|
.write("void " + apiName + '::' + cbName.replace('On', 'process') + '(Task task)' + "\n")
fprocess.write("{\n")
fprocess.write("\tPyLock lock;\n")
onArgsList = []
for i, type_ in enumerate(cbArgsTypeList):
if 'CSgitFtdcRspInfoField' in type_:
fprocess.write("\t"+ type_ + ' task_error = any_cast<' + type_ + '>(task.task_error);\n')
fprocess.write("\t"+ "dict error;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'error["' + key + '"] = task_error.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('error')
elif type_ in structDict:
fprocess.write("\t"+ type_ + ' task_data = any_cast<' + type_ + '>(task.task_data);\n')
fprocess.write("\t"+ "dict data;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'data["' + key + '"] = task_data.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('data')
elif type_ == 'bool':
onArgsList.append('task.task_last')
elif type_ == 'int':
onArgsList.append('task.task_id')
onArgs = join(onArgsList, ', ')
fprocess.write('\tthis->' + cbName.replace('On', 'on') + '(' + onArgs +');\n')
fprocess.write("};\n")
fprocess.write("\n")
def processFunction(line):
line = line.replace(' virtual int ', '') # 删除行首的无效内容
line = line.replace(') = 0;\n', '') # 删除行尾的无效内容
content = line.split('(')
fcName = content[0] # 回调函数名称
fcArgs = content[1] # 回调函数参数
fcArgs = fcArgs.replace(')', '')
fcArgsList = fcArgs.split(',') # 将每个参数转化为列表
fcArgsTypeList = []
fcArgsValueList = []
for arg in fcArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) >= 2:
fcArgsTypeList.append(content[0]) # 参数类型列表
fcArgsValueList.append(content[1]) # 参数数据列表
print line
print fcArgs
|
pwollstadt/IDTxl
|
dev/search_GPU/neighbour_search_opencl_old.py
|
Python
|
gpl-3.0
| 12,468
| 0.000962
|
"""Provide neighbour searches using OpenCl GPU-code."""
from pkg_resources import resource_filename
import numpy as np
from . import idtxl_exceptions as ex
try:
import pyopencl as cl
except ImportError as err:
ex.package_missing(err, 'PyOpenCl is not available on this system. Install'
' it using pip or the package manager to use '
'OpenCL-powered CMI estimation.')
def knn_search(pointset, n_dim, knn_k, theiler_t, n_chunks=1, gpuid=0):
"""Interface with OpenCL knn search from Python/IDTxl."""
# check for a data layout in memory as expected by the low level functions
# ndim * [n_points * n_chunks]
if n_dim != pointset.shape[0]:
assert n_dim == pointset.shape[1], ('Given dimension does not match '
'data.')
pointset = pointset.transpose().copy()
print('>>>search GPU: fixed shape of input data')
if pointset.flags['C_CONTIGUOUS'] is not True:
pointset = np.ascontiguousarray(pointset)
print('>>>search GPU: fixed memory layout of input data')
pointdim = pointset.shape[0]
n_points = pointset.shape[1]
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
success = clFindKnn(indexes, distances, pointset.astype('float32'),
pointset.astype('float32'), int(knn_k), int(theiler_t),
int(n_chunks), int(pointdim), int(n_points),
int(gpuid))
if success:
return (indexes, distances)
else:
print("Error in OpenCL knn search!")
return 1
def range_search(pointset, n_dim, radius, theiler_t, n_chunks=1, gpuid=0):
"""Interface with OpenCL range search from Python/IDTxl."""
# check for a data layout in memory as expected by the low level functions
# ndim * [n_points * n_chunks]
if n_dim != pointset.shape[0]:
assert n_dim == pointset.shape[1], ('Given dimension does not match '
'data axis.')
pointset = pointset.transpose().copy()
print('>>>search GPU: fixed shape input data')
if pointset.flags['C_CONTIGUOUS'] is not True:
pointset = np.ascontiguousarray(pointset)
print('>>>search GPU: fixed memory layout of input data')
pointdim = pointset.shape[0]
n_points = pointset.shape[1]
pointcount = np.zeros((n_points), dtype=np.int32)
success = clFindRSAll(pointcount, pointset.astype('float32'),
pointset.astype('float32'), radius, theiler_t,
n_chunks, pointdim, n_points, gpuid)
if success:
return pointcount
else:
print("Error in OpenCL range search!")
return 1
def clFindKnn(h_bf_indexes, h_bf_distances, h_pointset, h_query, kth, thelier,
nchunks, pointdim, signallength, gpuid):
triallength = int(signallength / nchunks)
# print 'Values:', pointdim, triallength, signallength, kth, thelier
'''for platform in cl.get_platforms():
for device in platform.get_devices():
print("===============================================================")
print("Platform name:", platform.name)
print("Platform profile:", platform.profile)
print("Platform vendor:", platform.vendor)
print("Platform version:", platform.version)
print("---------------------------------------------------------------")
print("Device name:", device.name)
print("Device type:", cl.device_type.to_string(device.type))
print("Device memory: ", device.global_mem_size//1024//1024, 'MB')
print("Device max clock speed:", device.max_clock_frequency, 'MHz')
print("Device compute units:", device.max_compute_units)
print("Device max work group size:", device.max_work_group_size)
print("Device max work item sizes:", device.max_work_item_sizes)'''
# Set up OpenCL
# context = cl.create_some_context()
platform = cl.get_platforms()
platf_idx = find_nonempty(platform)
print('platform index chosen is: {0}'.format(platf_idx))
my_gpu_devices = platform[platf_idx].get_devices(device_type=cl.device_type.GPU)
context = cl.Context(devices=my_gpu_devices)
queue = cl.CommandQueue(context, my_gpu_devices[gpuid])
print(("Selected Device: ", my_gpu_devices[gpuid].name))
# Check memory resources.
usedmem =int( (h_query.nbytes + h_pointset.nbytes + h_bf_distances.nbytes + h_bf_indexes.nbytes)//1024//1024)
totalmem = int(my_gpu_devices[gpuid].global_mem_size//1024//1024)
if (totalmem*0.90) < usedmem:
print(("WARNING:", usedmem, "Mb used out of", totalmem,
"Mb. The GPU could run out of memory."))
# Create OpenCL buffers
d_bf_query = cl.Buffer(context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=h_query)
d_bf_pointset = cl.Buffer(context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=h_pointset)
d_bf_distances = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
h_bf_distances.nbytes)
d_bf_indexes = cl.Buffer(context,
|
cl.mem_flags.READ_WRITE,
h_bf_indexes.nbytes)
# Kernel Launch
kernelLocation = resource_filename(__name__, 'gpuKnnBF_kernel.cl')
kernelsource = open(kernelLocation).read()
program = cl.Program(context, kernelsource).build()
kern
|
elKNNshared = program.kernelKNNshared
kernelKNNshared.set_scalar_arg_dtypes([None, None, None, None, np.int32,
np.int32, np.int32, np.int32,
np.int32, None, None])
# Size of workitems and NDRange
if signallength/nchunks < my_gpu_devices[gpuid].max_work_group_size:
workitems_x = 8
elif my_gpu_devices[gpuid].max_work_group_size < 256:
workitems_x = my_gpu_devices[gpuid].max_work_group_size
else:
workitems_x = 256
if signallength % workitems_x != 0:
temp = int(round(((signallength)/workitems_x), 0) + 1)
else:
temp = int(signallength/workitems_x)
NDRange_x = workitems_x * temp
# Local memory for distances and indexes
localmem = (np.dtype(np.float32).itemsize*kth*workitems_x +
np.dtype(np.int32).itemsize*kth*workitems_x) / 1024
if localmem > my_gpu_devices[gpuid].local_mem_size / 1024:
print('Localmem alocation will fail. {0} kb available, and it needs '
'{1} kb.'.format(my_gpu_devices[gpuid].local_mem_size / 1024,
localmem))
localmem1 = cl.LocalMemory(np.dtype(np.float32).itemsize*kth*workitems_x)
localmem2 = cl.LocalMemory(np.dtype(np.int32).itemsize*kth*workitems_x)
kernelKNNshared(queue, (NDRange_x,), (workitems_x,), d_bf_query,
d_bf_pointset, d_bf_indexes, d_bf_distances, pointdim,
triallength, signallength, kth, thelier, localmem1,
localmem2)
queue.finish()
# Download results
cl.enqueue_copy(queue, h_bf_distances, d_bf_distances)
cl.enqueue_copy(queue, h_bf_indexes, d_bf_indexes)
# Free buffers
d_bf_distances.release()
d_bf_indexes.release()
d_bf_query.release()
d_bf_pointset.release()
return 1
'''
* Range search being radius a vector of length number points in queryset/pointset
'''
def clFindRSAll(h_bf_npointsrange, h_pointset, h_query, h_vecradius, thelier,
nchunks, pointdim, signallength, gpuid):
triallength = int(signallength / nchunks)
# print 'Values:', pointdim, triallength, signallength, kth, thelier
'''for platform in cl.get_platforms():
for device in platform.get_devices():
print("===============================================================")
print("Platform
|
CYBAI/servo
|
components/script/dom/bindings/codegen/parser/tests/test_optional_constraints.py
|
Python
|
mpl-2.0
| 981
| 0.001019
|
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface OptionalConstraints1 {
undefined foo(optional byte arg1, byte arg2);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw,
"Should not have thrown on non-optional argument fo
|
llowing "
"optional argument.")
parser = parser.reset()
parser.parse("""
interface OptionalConstraints2 {
undefined foo(optional byte arg1 = 1, optional byte arg2 = 2,
optional byte arg3, optional byte arg
|
4 = 4,
optional byte arg5, optional byte arg6 = 9);
};
""")
results = parser.finish()
args = results[0].members[0].signatures()[0][1]
harness.check(len(args), 6, "Should have 6 arguments")
harness.check(args[5].defaultValue.value, 9,
"Should have correct default value")
|
Kjell-K/AirGym
|
gym_airsim/envs/myAirSimClient.py
|
Python
|
mit
| 6,563
| 0.017218
|
import numpy as np
import time
import math
import cv2
from pylab import array, arange, uint8
from PIL import Image
import eventlet
from eventlet import Timeout
import multiprocessing as mp
# Change the path below to point to the directoy where you installed the AirSim PythonClient
#sys.path.append('C:/Users/Kjell/Google Drive/MASTER-THESIS/AirSimpy')
from AirSimClient import *
class myAirSimClient(MultirotorClient):
def __init__(self):
self.img1 = None
self.img2 = None
MultirotorClient.__init__(self)
MultirotorClient.confirmConnection(self)
self.enableApiControl(True)
self.armDisarm(True)
self.home_pos = self.getPosition()
self.home_ori = self.getOrientation()
self.z = -6
def straight(self, duration, speed):
pitch, roll, yaw = self.getPitchRollYaw()
vx = math.cos(yaw) * speed
vy = math.sin(yaw) * speed
self.moveByVelocityZ(vx, vy, self.z, duration, DrivetrainType.ForwardOnly)
start = time.time()
return start, duration
def yaw_right(self, duration):
self.rotateByYawRate(30, duration)
start = time.time()
return start, duration
def yaw_left(self,
|
duration):
self.rotateByYawRate(-30, duration)
start = time.time()
return start, duration
def take_action(self, action):
#check if copter is on level cause sometimes he goes up without a reason
x = 0
while self.getPosition().z_val < -7.0:
self.moveToZ(-6, 3)
time.sleep(1)
print(self.getPosition().z_val, "and", x)
x = x + 1
if x > 10:
return True
|
start = time.time()
duration = 0
collided = False
if action == 0:
start, duration = self.straight(1, 4)
while duration > time.time() - start:
if self.getCollisionInfo().has_collided == True:
return True
self.moveByVelocity(0, 0, 0, 1)
self.rotateByYawRate(0, 1)
if action == 1:
start, duration = self.yaw_right(0.8)
while duration > time.time() - start:
if self.getCollisionInfo().has_collided == True:
return True
self.moveByVelocity(0, 0, 0, 1)
self.rotateByYawRate(0, 1)
if action == 2:
start, duration = self.yaw_left(1)
while duration > time.time() - start:
if self.getCollisionInfo().has_collided == True:
return True
self.moveByVelocity(0, 0, 0, 1)
self.rotateByYawRate(0, 1)
return collided
def goal_direction(self, goal, pos):
pitch, roll, yaw = self.getPitchRollYaw()
yaw = math.degrees(yaw)
pos_angle = math.atan2(goal[1] - pos.y_val, goal[0]- pos.x_val)
pos_angle = math.degrees(pos_angle) % 360
track = math.radians(pos_angle - yaw)
return ((math.degrees(track) - 180) % 360) - 180
def getScreenDepthVis(self, track):
responses = self.simGetImages([ImageRequest(0, AirSimImageType.DepthPerspective, True, False)])
img1d = np.array(responses[0].image_data_float, dtype=np.float)
img1d = 255/np.maximum(np.ones(img1d.size), img1d)
img2d = np.reshape(img1d, (responses[0].height, responses[0].width))
image = np.invert(np.array(Image.fromarray(img2d.astype(np.uint8), mode='L')))
factor = 10
maxIntensity = 255.0 # depends on dtype of image data
# Decrease intensity such that dark pixels become much darker, bright pixels become slightly dark
newImage1 = (maxIntensity)*(image/maxIntensity)**factor
newImage1 = array(newImage1,dtype=uint8)
small = cv2.resize(newImage1, (0,0), fx=0.39, fy=0.38)
cut = small[20:40,:]
info_section = np.zeros((10,cut.shape[1]),dtype=np.uint8) + 255
info_section[9,:] = 0
line = np.int((((track - -180) * (100 - 0)) / (180 - -180)) + 0)
if line != (0 or 100):
info_section[:,line-1:line+2] = 0
elif line == 0:
info_section[:,0:3] = 0
elif line == 100:
info_section[:,info_section.shape[1]-3:info_section.shape[1]] = 0
total = np.concatenate((info_section, cut), axis=0)
#cv2.imshow("Test", total)
#cv2.waitKey(0)
return total
def AirSim_reset(self):
self.reset()
time.sleep(0.2)
self.enableApiControl(True)
self.armDisarm(True)
time.sleep(1)
self.moveToZ(self.z, 3)
time.sleep(3)
def AirSim_reset_old(self):
reset = False
z = -6.0
while reset != True:
now = self.getPosition()
self.simSetPose(Pose(Vector3r(now.x_val, now.y_val, -30),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True)
now = self.getPosition()
if (now.z_val - (-30)) == 0:
self.simSetPose(Pose(Vector3r(self.home_pos.x_val, self.home_pos.y_val, -30),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True)
now = self.getPosition()
if (now.x_val - self.home_pos.x_val) == 0 and (now.y_val - self.home_pos.y_val) == 0 and (now.z_val - (-30)) == 0 :
self.simSetPose(Pose(Vector3r(self.home_pos.x_val, self.home_pos.y_val, self.home_pos.z_val),Quaternionr(self.home_ori.w_val, self.home_ori.x_val, self.home_ori.y_val, self.home_ori.z_val)), True)
now = self.getPosition()
if (now.x_val - self.home_pos.x_val) == 0 and (now.y_val - self.home_pos.y_val) == 0 and (now.z_val - self.home_pos.z_val) == 0:
reset = True
self.moveByVelocity(0, 0, 0, 1)
time.sleep(1)
self.moveToZ(z, 3)
time.sleep(3)
|
InfectedPacket/resyst
|
resyst/__init__.py
|
Python
|
gpl-2.0
| 262
| 0.003817
|
# -*- coding: utf-8 -
|
*-
"""Automatic reverse engineering of firmware files for embedded devices."""
from resyst import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyri
|
ght__ = metadata.copyright
|
lituan/tools
|
pisa/pisa_same_entity.py
|
Python
|
cc0-1.0
| 5,849
| 0.014361
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
check same interface phos_binding patterns
"""
import os
import sys
import urllib
import urllib2
import cPickle as pickle
from multiprocessing import Pool
def get_entityid(p):
pdbid,interface_id,chain1,chain2 = p
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?'
data = {
'pdbids':pdbid,
'customReportColumns':'structureId,entityId',
'service':'wsfile',
'format':'csv',
}
data = urllib.urlencode(data)
req = urllib2.Request(url,data)
response = urllib2.urlopen(req)
lines = response.readlines()
lines = [line.rstrip('\r\n') for line in lines[1:]]
lines = [line for line in lines if line]
lines = [line.split(',') for line in lines]
lines = [[w.strip('"') for w in line] for line in lines]
chain1_id = [line for line in lines if line[1] == chain1][0][2]
chain2_id = [line for line in lines if line[1] == chain1][0][2]
return pdbid,interface_id,chain1_id,chain2_id
def filter_same_interface(pdb_interfaces):
pdbid_chain = [(p[0],p[1],p[-1][0][0],p[-1][1][0]) for p in pdb_interfaces]
p = Pool(4)
result = p.map(get_entityid,pdbid_chain)
p.close()
pdb_chain_entity = {}
for r in result:
if not (r[0],r[2],r[3]) in pdb_chain_entity.keys():
pdb_chain_entity[(r[0],r[2],r[3])] = [r]
else:
pdb_chain_entity[(r[0],r[2],r[3])].append(r)
with open('same_interface.txt','w') as w_f:
same = []
different = []
for k,v in pdb_chain_entity.iteritems():
if len(v) > 1:
print >> w_f,k
cluster = [p for p in pdb_interfaces if (p[0],p[1]) in [(vi[0],vi[1]) for vi in v]]
cluster_patterns = []
for c in cluster:
bonds = c[6]
phos_interacting_residues = {}
PHOS = ['TPO_ O1P','TPO_ O2P','TPO_ O3P','TPO_ OG1','SEP_ O1P','SEP_ O2P','SEP_ O3P','SEP_ OG ','PTR_ O1P','PTR_ O2P','PTR _O3P','PTR OH ']
for bond in bonds:
bond_type,bond_info = bond
for bondi in bond_info:
res1,res2,dist = bondi
if [p for p in PHOS if res1[-8:] == p]:
res1 = '_'.join(res1.split('_')[:3])
if not res1 in phos_interacting_residues.keys():
phos_interacting_residues[res1] = [res2]
else:
phos_interacting_residues[res1].append(res2)
elif [p for p in PHOS if res2[-8:] == p]:
res2 = '_'.join(res2.split('_')[:3])
if not res2 in phos_interacting_residues.keys():
phos_interacting_residues[res2] = [res1]
else:
phos_interacting_residues[res2].append(res1)
for phos,interacting_residues in phos_interacting_residues.items():
if interacting_residues:
interacting_residues = ['_'.join(r.split('_')[:3]) for r in interacting_residues]
interacting_residues = list(set(interacting_residues))
interacting_residues = [r.split('_')[2] for r in interacting_residues]
interacting_residues = sorted(interacting_residues)
interacting_residues = '_'.join(interacting_residues)
cluster_patterns.append(interacting_residues)
print >> w_f,c[0],c[1],interacting_residues
print cluster_patterns
if len(cluster_patterns) > 1 and len(set(cluster_patterns)) == 1:
same.append(1)
else:
different.append(1)
print 'same',len(same)
print 'different',len(different)
pdb_unique_interface = [(v[0][0],v[0][1]) for k,v in pdb_chain_entity.iteritems()]
pdb_interfaces = [p for p in pdb_interfaces if (p[0],p[1]) in pdb_unique_interface]
print 'after filter same entity',len(pdb_interfaces)
return pdb_interfaces
def filter_non_one_phos(pdb_interfaces):
zero_phos_interfaces = []
one_phos_interfaces = []
more_phos_interfaces = []
for interface in pdb_interfaces:
pdbid,p1,interface_area,p2,p3,p4,bonds = interface[:7]
phos_res = []
for bond in bonds:
bond_type,bond_info = bond
for bondi in bond_info:
res1,res2,dist = bondi
if 'TPO' in res1 or 'SEP' in res1 or 'PTR' in res1:
phos_res.append('_'.join(res1.split('_')[:3]))
if 'TPO' in res2 or 'SEP' in res2 or 'PTR' in res2:
phos_res.append('_'.join(res2.split('_')[:3]))
phos_res = set(phos_res)
if len(phos_res) == 1:
one_phos_interfaces.append(interface)
elif len(phos_res) > 1:
more_phos_interfaces.append(interface)
else:
zero_phos_interfaces.append(interface)
print 'after filter non_one_phos_interfaces',len(one_phos_interfaces)
return one_phos_interfaces
def main():
pdb_interfaces = pickle.load(open(sys.argv[-1]))
pdb_interfaces = [p for p in pdb_interfaces if p[7][0][2].lower()
|
== 'x,y,z' and p[7][1][2].lower() == 'x,y,z']
pdb_interfaces = [p for p in pdb_interfaces if p[7][0][1] == 'Protein' and p[7][1][1] == 'Protein']
pdb_interfaces = filter_non_one_phos(pdb_interfaces)
pdb_interfaces = filter_same_interface(pdb_interfaces)
if __name__ == "__main
|
__":
main()
|
killtheyak/killtheyak.github.io
|
killtheyak/test/webtest_tests.py
|
Python
|
mit
| 1,412
| 0.000708
|
from unittest import TestCase
from webtest import TestApp
from nose.tools import * # noqa
from ..main import app
class TestAUser(TestCase):
def setUp(self):
self.app = TestApp(app)
def tearDown(self):
pass
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('/')
assert_equal(res.status_code, 200)
assert_in("All I want to do is", res)
def test_can_see_a_page(self):
# Goes to homepage
res = self.app.get('/')
# Sees titles for a page
assert_in('install Python', res)
# Clicks on a title
res = res.click('install Python 2 and/or 3')
assert_equal(res.status_code, 200)
# Is at the page
# Can see the title
assert_in("Install Python", res)
# And the OS's
assert_in("macosx", res)
# And the content
assert_in('brew install python3', res)
def test_can_see_deps(self):
# Goes to homepage
res = self.app.get('/')
# Cli
|
cks on a page
res = res.click('install Python 2 and/or 3')
# The page has dependency
# The dependency titles are listed
assert_in("install-homebrew", res)
# Clic
|
ks on the dependency link (full instructions)
res = res.click('full instructions', index=0)
# Is at the dependency's page
assert_in('ruby', res)
|
houssine78/addons
|
product_to_scale_bizerba/__openerp__.py
|
Python
|
agpl-3.0
| 1,686
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-Today GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Products - Send to Scales',
'summary': 'Synchronize Odoo database with Scales',
'version': '1.0',
'category': 'Tools',
'description': """
=============================================
Synchronize Odoo database with Bizerba Scales
=============================================
Roadmap / Know Issues
---------------------
* It should be great to split this module into many modules, with some generic
features in a module named product_to_scale and some specifics features
for each scales system. 'product_to_scale_bizerba',
'product_to_scale_metler_toledo', etc.
""",
'author': 'GRAP',
'website': 'http://www.grap.coop',
'license': 'AGPL-3',
'depends': [
'product',
],
'data': [
'security/ir_module_category.xml',
'security/res_groups.xml',
'security/ir.model.access.csv',
'data/ir_config_parameter.xml',
'data/ir_cron.xml',
'views/view_product_product.xml',
'views/view_product_uom.xml',
'views/view_product_scale_system.xml',
'views/view_product_scale_group.xml',
'views/view_product_scale_log.xml',
'views/action.xml',
|
'views/menu.xml',
],
'demo': [
'demo/res_users.xml',
'demo/product_scale_system.xml',
'demo/product_scale_system_product_line.xml',
'demo/produc
|
t_scale_group.xml',
'demo/product_product.xml',
'demo/decimal_precision.xml',
],
}
|
sukharevd/hadoop-install
|
bin/cherrypy-dns.py
|
Python
|
apache-2.0
| 1,744
| 0.012615
|
#!/usr/bin/python
import sys
service_name = "cherrypy-dns"
pidfile_path = "/var/run/" + service_name + ".pid"
port = 8001
if len(sys.argv) > 1 and sys.argv[1] == "service_name": print service_name; sys.exit(0)
if len(sys.argv) > 1 and sys.argv[1] == "pidfile_path": print pidfile_path; sys.exit(0)
if len(sys.argv) > 1 and sys.argv[1] == "port": print port; sys.exit(0)
import cherrypy, os, subprocess
from cherrypy.process.plugins import PIDFile
p = PIDFile(cherrypy.engine, pidfile_path)
p.subscribe()
script_dir = os.path.dirname(os.path.realpath(__file__))
class ScriptRunner:
@cherrypy.expose
def add(self, hostname="", ip=""):
return self.execute_command('bash ' + script_dir + '/add-domain-name.sh "' + hostname + '" "' + ip + '"')
@cherrypy.expose
def remove(self, hostname=None, ip=None):
if not hostname: raise cherrypy.HTTPError(400, "Hostname parameter is required.")
if not ip: raise cherrypy.HTTPError(400, "IP parameter is required.")
return self.execute_command('bash ' + script_dir + '/remove-domain-name.sh "' + hostname + '" "' + ip + '"')
#@cherrypy.expose
|
#def lookup(self, attr):
# return subprocess.check_output('bash -c "cat $HADOOP_CONF_DIR/slaves"', shell=True)
def execute_command(self, command):
try:
return subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as e:
raise cherrypy.HTTPError(500, e.cmd + " exited with code " + str(e.returncode) + "\n" + e.output)
conf = {
'global': {
|
'server.socket_host': '127.0.0.1',
'server.socket_port': port,
'server.thread_pool': 1
}
}
cherrypy.quickstart(ScriptRunner(), '/domain-names/', conf)
|
grap/OCB
|
addons/purchase/purchase.py
|
Python
|
agpl-3.0
| 71,343
| 0.007639
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
from openerp.tools.safe_eval import safe_eval as eval
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line)
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda
|
self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=Non
|
e: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplie
|
sserrot/champion_relationships
|
venv/Lib/site-packages/jedi/api/file_name.py
|
Python
|
mit
| 5,707
| 0.001752
|
import os
from jedi._compatibility import FileNotFoundError, force_unicode, scandir
from jedi.api import classes
from jedi.api.strings import StringName, get_quote_ending
from jedi.api.helpers import match
from jedi.inference.helpers import get_str_or_none
class PathName(StringName):
api_type = u'path'
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
like_name, signatures_callback, code_lines, position, fuzzy):
# First we want to find out what can actually be changed as a name.
like_name_length = len(os.path.basename(string))
addition = _get_string_additions(module_context, start_leaf)
if string.startswith('~'):
string = os.path.expanduser(string)
if addition is None:
return
string = addition + string
# Here we use basename again, because if strings are added like
# `'foo' + 'bar`, it should complete to `foobar/`.
must_start_with = os.path.basename(string)
string = os.path.dirname(string)
sigs = signatures_callback(*position)
is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
if is_in_os_path_join:
to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
if to_be_added is None:
is_in_os_path_join = False
else:
string = to_be_added + string
base_path = os.path.join(inference_state.project.path, string)
try:
listed = sorted(scandir(base_path), key=lambda e: e.name)
# OSError: [Errno 36] File name too long: '...'
except (FileNotFoundError, OSError):
return
quote_ending = get_quote_ending(quote, code_lines, position)
for entry in listed:
name = entry.name
if match(name, must_start_with, fuzzy=fuzzy):
if is_in_os_path_join or not entry.is_dir():
name += quote_ending
else:
name += os.path.sep
yield classes.Completion(
inference_state,
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
stack=None,
like_name_length=like_name_length,
|
is_fuzzy=fuzzy,
)
def _get_string_additions(module_context, start_leaf):
def iterate_nodes():
node = addition.parent
was_addition = True
for child_node in reversed(node.children[:node.children.index(addition)]):
if was_addition:
was_addition = False
yield child_node
continue
if child_node != '+':
|
break
was_addition = True
addition = start_leaf.get_previous_leaf()
if addition != '+':
return ''
context = module_context.create_context(start_leaf)
return _add_strings(context, reversed(list(iterate_nodes())))
def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
values = context.infer_node(child_node)
if len(values) != 1:
return None
c, = values
s = get_str_or_none(c)
if s is None:
return None
if not first and add_slash:
string += os.path.sep
string += force_unicode(s)
first = False
return string
def _add_os_path_join(module_context, start_leaf, bracket_start):
def check(maybe_bracket, nodes):
if maybe_bracket.start_pos != bracket_start:
return None
if not nodes:
return ''
context = module_context.create_context(nodes[0])
return _add_strings(context, nodes, add_slash=True) or ''
if start_leaf.type == 'error_leaf':
# Unfinished string literal, like `join('`
value_node = start_leaf.parent
index = value_node.children.index(start_leaf)
if index > 0:
error_node = value_node.children[index - 1]
if error_node.type == 'error_node' and len(error_node.children) >= 2:
index = -2
if error_node.children[-1].type == 'arglist':
arglist_nodes = error_node.children[-1].children
index -= 1
else:
arglist_nodes = []
return check(error_node.children[index + 1], arglist_nodes[::2])
return None
# Maybe an arglist or some weird error case. Therefore checked below.
searched_node_child = start_leaf
while searched_node_child.parent is not None \
and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
searched_node_child = searched_node_child.parent
if searched_node_child.get_first_leaf() is not start_leaf:
return None
searched_node = searched_node_child.parent
if searched_node is None:
return None
index = searched_node.children.index(searched_node_child)
arglist_nodes = searched_node.children[:index]
if searched_node.type == 'arglist':
trailer = searched_node.parent
if trailer.type == 'error_node':
trailer_index = trailer.children.index(searched_node)
assert trailer_index >= 2
assert trailer.children[trailer_index - 1] == '('
return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
elif trailer.type == 'trailer':
return check(trailer.children[0], arglist_nodes[::2])
elif searched_node.type == 'trailer':
return check(searched_node.children[0], [])
elif searched_node.type == 'error_node':
# Stuff like `join(""`
return check(arglist_nodes[-1], [])
|
efforia/eos-dashboard
|
pandora-hub/app.py
|
Python
|
lgpl-3.0
| 5,312
| 0.022779
|
import re
from unicodedata import normalize
from datetime import datetime
from django.shortcuts import render
from django.http import HttpResponse as response
from django.http import HttpResponseRedirect as redirect
from django.conf import settings
from models import Spreadable,Image,Playable,Spreaded,Product
from socialize.models import Profile
from socialize.stream import StreamService,Dropbox
from efforia.main import Efforia
from feedly.feed import Activity
def sp(x): return '!!' in x[1]
def pl(x): return '>!' in x[1]
def im(x): return '%!' in x[1]
class Application(Activity):
def __init__(self,user,app):
Activity.__init__(self,user,app)
def deadline(self):
playables = Playable.objects.filter(user=self.user)
for play in playables:
if not play.token and not play.visual: play.delete()
def relations(self,feed):
excludes = []; rels = Spreaded.objects.filter(user=self.user)
excludes.extend([(r.spreaded,'!!') for r in rels])
excludes.extend([(r.spread,r.token()) for r in rels])
for v in rels.values('spread').distinct():
t = rels.filter(spread=v['spread'],user=self.user)
if len(t) > 0: feed.append(t[len(t)-1])
return excludes
def duplicates(self,exclude,feed):
for o in self.objects:
objects = globals()[o].objects.filter(user=self.user)
if 'Spreadable' in o: e = filter(sp,exclude)
elif 'Playable' in o: e = filter(pl,exclude)
elif 'Image' in o: e = filter(im,exclude)
excludes = [x[0] for x in e]
feed.extend(objects.exclude(id__in=excludes))
class Images(Efforia):
def __init__(self): pass
def view_image(self,request):
return render(request,'image.jade',{'static_url':settings.STATIC_URL},content_type='text/html')
def upload_image(self,request):
photo = request.FILES['Filedata'].read()
dropbox = Dropbox()
link = dropbox.upload_and_share(photo)
res = self.url_request(link)
url = '%s?dl=1' % res
return url
def create_image(self,request):
u = self.current_user(request)
if 'description' in request.POST:
image = list(Image.objects.filter(user=u))[-1:][0]
descr = request.POST['description']
image.description = descr
image.save()
return response('Description added to image successfully')
i = Image(link=self.upload_image(request),user=u)
i.save()
return response('Image created successfully')
class Spreads(Efforia):
def __init__(self): pass
def start_spreadapp(self,request):
return render(request,'spreadapp.jade',{'static_url':settings.STATIC_URL},content_type='text/html')
def view_spread(self,request):
return render(request,"sp
|
read.jade",{},content_type='text/html')
def create_spread(self,request):
u = self.current_user(request)
name = u.first_name.lower()
text = unicode('%s' % (request.POST['content']))
p
|
ost = Spreadable(user=u,content=text,name='!'+name)
post.save()
self.accumulate_points(1,request)
return response('Spreadable created successfully')
class Uploads(Efforia):
def __init__(self): pass
def view_upload(self,request):
return render(request,'content.jade',{'static_url':settings.STATIC_URL},content_type='text/html')
def set_thumbnail(self,request):
u = self.current_user(request)
service = StreamService()
token = request.GET['id']
access_token = u.profile.google_token
thumbnail = service.video_thumbnail(token,access_token)
play = Playable.objects.filter(user=u).latest('date')
play.visual = thumbnail
play.token = token
play.save()
self.accumulate_points(1,request)
r = redirect('/')
r.set_cookie('token',token)
return r
def view_content(self,request):
u = self.current_user(request)
content = title = ''
for k,v in request.REQUEST.iteritems():
if 'title' in k: title = v
elif 'content' in k: content = v
elif 'status' in k:
return self.set_thumbnail(request)
try:
url,token = self.parse_upload(request,title,content)
return render(request,'video.jade',{'static_url':settings.STATIC_URL,
'hostname':request.get_host(),
'url':url,'token':token},content_type='text/html')
except Exception: return response('Invalid file for uploading')
def parse_upload(self,request,title,content):
keys = ','; keywords = content.split(' ')
for k in keywords: k = normalize('NFKD',k.decode('utf-8')).encode('ASCII','ignore')
keys = keys.join(keywords)
playable = Playable(user=self.current_user(request),name='>'+title,description=content)
playable.save()
service = StreamService()
access_token = self.current_user(request).profile.google_token
return service.video_entry(title,content,keys,access_token)
def media_chooser(self,request):
return render(request,'chooser.jade')
|
edx/edx-ora2
|
manage.py
|
Python
|
agpl-3.0
| 762
| 0.001312
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if os.environ.get('DJANGO_SETTINGS_MODULE') is None:
|
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.base'
# When using an on-disk database for the test suite,
# Django asks us if we want to delete the database.
# We do.
if 'test' in sys.argv[0:3]:
# Catch warnings in te
|
sts and redirect them to be handled by the test runner. Otherwise build results are too
# noisy to be of much use.
import logging
logging.captureWarnings(True)
sys.argv.append('--noinput')
sys.argv.append('--logging-clear-handlers')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
edmorley/django
|
tests/migrations2/test_migrations_2/0001_initial.py
|
Python
|
bsd-3-clause
| 562
| 0
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0002_second")]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", mod
|
els.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugFie
|
ld(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
]
|
puttarajubr/commcare-hq
|
custom/api/utils.py
|
Python
|
bsd-3-clause
| 714
| 0.001401
|
from requests.auth import HTTPBasicAuth
def apply_updates(doc, update_dict):
# updates the doc with items from the dict
# returns whether or not any updates were made
should_save = False
for key, value in update_dict.items():
if getattr(doc, key, None) != value:
|
setattr(doc, key, value)
should_save = True
return should_save
class EndpointMixin(object):
@classmethod
def from_config(cls, config):
return cls(config.url, config.username, config.password)
def _auth(self):
return HTTPBasicAuth(self.username, self.passwo
|
rd)
def _urlcombine(self, base, target):
return '{base}{target}'.format(base=base, target=target)
|
nemesiscodex/openfonacide
|
openfonacide/management/commands/actualizar_datasets.py
|
Python
|
lgpl-3.0
| 3,284
| 0.002134
|
# encoding: utf-8
import csv
from urllib2 import HTTPError
import django
from django.db import transaction
import urllib2
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from openfonacide.matcher import Matcher
from openfonacide.models import Importacion, RegistroImportaci
|
on, Adjudicacion, Planificacion, Temporal, Institucion
__author__ = 'Diego Ramírez'
def registrar_ultima_importacion(importacion=None, md5_sum=None):
registro = RegistroImportacion(ultimo=True, ultimo_md5=md5_sum, importacion=importacion, fecha=datetime.now())
registro.save()
@transaction.atomic
def do_import(lines_list=None,
|
tipo=None):
header_flag = True
header = list()
reader = csv.reader(lines_list.splitlines())
for row in reader:
if header_flag:
for column in row:
header.append(column)
header_flag = False
else:
args = dict()
for element in range(len(row)):
# setattr(a, header[i], row[i])
args[header[element]] = row[element]
if tipo is None:
return
if tipo == u"planificacion":
# Planificación logic
try:
Planificacion.objects.update_or_create(id=args['id'], anio=args['anio'], defaults=args)
except Exception as e:
continue
if tipo == u"adjudicación":
# adjudicación logic
try:
Adjudicacion.objects.update_or_create(id=args['id'], defaults=args)
except Exception as e:
continue
def read_url_file(url=None):
try:
_file = urllib2.urlopen(url)
data = _file.read()
_file.close()
return data
except HTTPError as e:
# apply log
print e.message
except:
print "We don't know exactly what happened"
return ""
class Command(BaseCommand):
def handle(self, *args, **options):
tareas = Importacion.objects.filter(activo=True)
worked = False
need_match = False
for t in tareas:
md5 = read_url_file(t.md5_url)
try:
registro = t.registroimportacion_set.get(ultimo=True)
if md5 == registro.ultimo_md5:
return
do_import(read_url_file(t.url), t.tipo)
registro.ultimo = False
registro.save()
registrar_ultima_importacion(importacion=t, md5_sum=md5)
except ObjectDoesNotExist:
do_import(read_url_file(t.url), t.tipo)
registrar_ultima_importacion(importacion=t, md5_sum=md5)
worked = True
if t.tipo == u'planificacion':
need_match = True
if worked and need_match:
m = Matcher(institucion_manager=Institucion.objects, planificacion_manager=Planificacion.objects,
temporal_manager=Temporal.objects
)
m.do_match()
# This Section is used just for debugging
if __name__ == "__main__":
django.setup()
c = Command()
c.handle()
|
jaygoswami2303/course_dashboard_api
|
v2/GradeAPI/api.py
|
Python
|
mit
| 27,402
| 0.006861
|
from django.http import HttpResponse
import pymongo
import MySQLdb
from course_dashboard_api.v2.dbv import *
sql_user = MYSQL_USER
sql_pswd = MYSQL_PSWD
mysql_db = MYSQL_DB
mongo_db = MONGO_DB
""" Description: Function to get quiz level grades of all students in a particular course.
Input Parameters:
course_name: name of the course for which grades are required (ex. CT101.1x)
course_run: run of the course for which grades are required (ex. 2016-17)
course_organization: organization of the course for which grades are required (ex. IITBombayX)
Output Type : List of grades of all students enrolled in the course
Author: Jay Goswami
Date of creation: 30 May 2017
"""
def get_all_student_grades(course_name, course_run, course_organization):
student_count = 1
try:
mongo_client = pymongo.MongoClient() # Establishing MongoDB connection
except:
print "MongoDB connection not established"
return HttpResponse("MongoDB connection not established") # MongoDB could not be connected
try:
db_mysql = MySQLdb.connect(user=sql_user, passwd=sql_pswd, db=mysql_db) # Establishing MySQL connection
except:
print "MySQL connection not established"
return HttpResponse("MySQL connection not established") # MySQL could not be connected
full_grade_list = []
problem_query = "Select grade,max_grade from courseware_studentmodule where max_grade is not null and grade is not null and student_id=%s and binary module_id=%s"
users_query = "select a.id, a.username, a.email, b.course_id from auth_user as a, student_courseenrollment as b where a.id=b.user_id"
# Query to retrieve the details of all students who have enrolled in any course
grading_policy = get_grading_policy(course_name, course_run, course_organization)
try:
grading_list = grading_policy['grader']
grade_cutoffs = grading_policy['grade_cutoffs']
except:
return None
mysql_cursor = db_mysql.cursor()
mysql_cursor.execute(users_query)
student_course_list = mysql_cursor.fetchall()
for student_course_pair in student_course_list:
found_course_id = student_course_pair[3].split(':')
if len(found_course_id)==1:
continue
course_id = course_organization + '+' + course_name + '+' + course_run
if course_id == found_course_id[1]: # Comparing course_id to get students enrolled in a particular course
student_grades = get_student_course_grades(str(student_course_pair[2]), str(student_course_pair[1]),
int(student_course_pair[0]), course_name, course_run, course_organization, student_count, db_mysql,
mongo_client, problem_query, grading_list, grade_cutoffs) # Calling function to get quiz grades of each student
student_count += 1 # Increment the count of students
full_grade_list.append(student_grades) # Appending stude
|
nt's grade list
|
mongo_client.close() # Closing MongoDB connection
db_mysql.close() # Closing MySQL connection
grade_list = {}
grade_list['course_name'] = course_name
grade_list['course_organization'] = course_organization
grade_list['course_run'] = course_run
grade_list['students'] = full_grade_list
return grade_list
""" Description: Function to get quiz level grades of each student in a particular course.This function is called by function get_all_student_grades().
Input Parameters:
email: Email id of student passed to this function by get_all_student_grades()
student_name: Username of student passed to this function by get_all_student_grades()
student_id: User ID of a student
course_id: ID of course for which grades are to be calculated
course_run: run of the course for which grades are to be calculated
course_organization: organization of the course for which grades are to be calculated
count: Number of students in the course including current student
db_mysql: MySQL Database connection object
mongo_client: MongoDB connection object
problem_query: Query passed to this function by get_all_student_grades()
Output Type: List
Author: Jay Goswami
Date of Creation: 30 May 2017
"""
def get_student_course_grades(email, student_name, student_id, course_id, course_run, course_organization, count, db_mysql, mongo_client, problem_query, grading_list, grade_cutoffs):
highchart_list = [] # List to be returned for highcharts
highchart_list2 = [] # List to be returned for highcharts
highchart_list3 = {}
highchart_list.append('total_score')
highchart_list3['id'] = student_id
highchart_list3['name'] = student_name
highchart_list3['email'] = email
db_mongo = mongo_client[mongo_db] # Getting the object for edxapp database of MongoDB
mongo_cur = db_mongo.modulestore.active_versions.find({"course":course_id, "run":course_run, "org":course_organization}) # Find the
i = mongo_cur[0]
active_version = mongo_cur[0]
version = i["versions"]["published-branch"]
try:
stud_avg_tot = 0
completed = True
for j in range(len(grading_list)): # iterating over the formats
best_score_list = [] # This list will store the final scores for the particular format
drop_count = grading_list[j]['drop_count'] # Gives number of droppable sections for that problem
type = grading_list[j]['type'] # Gives the type of the format i.e. Quiz, Final Exam etc.
short_label = grading_list[j]['short_label']
weight = grading_list[j]['weight'] # Gives the weights of the formats
min_count = grading_list[j]['min_count'] # Gives the minimum number of sections of that type present in the course
mongo_cur2 = db_mongo.modulestore.structures.find({'_id': version})
blocks = mongo_cur2[0]['blocks']
mongo_cur2 = []
for block in blocks:
if 'format' in block['fields'] and block['fields']['format']==type and block['fields']['graded']==True:
mongo_cur2.append(block)
count_doc = len(mongo_cur2)
# Query to find the different sequentials having the format 'type'
sequential_coun = 0 # intializing sequential count to zero
for k in mongo_cur2:
sequential_coun += 1
avg_score_sequential = 0
sum_avg_prob_score = 0
sum_prob_score_obt = 0
sum_tot_prob_score = 0
coun_prob = 0 # Initializing problem count as zero
list2 = k['fields'][
'children'] # Getting the children list of the sequential, this will consist of vertical ids
for m in range(len(list2)): # Iterating over the list of vertical ids
child_id = list2[m] # Getting the vertical id
vertical_id = child_id[1]
mongo_cur3 = []
for block in blocks:
if block['block_id']==vertical_id: # query to get the vertical document with the _id.name as vertical id
mongo_cur3.append(block)
n = mongo_cur3[0]
list3 = n['fields']['children'] # getting the children array for this vertical, consisiting of list of component ids
for o in range(len(list3)): # Iterating over the list of component ids
comp_id = list3[o] # Getting the component id
component_id = comp_id[1]
mongo_cur4 = []
for block in blocks:
if block['block_id'] == component_id:
mongo_cur4.append(block)
# query to get the problem document with the _id.name as problem id and category as problem.
try:
|
iceman1989/Check_mk
|
web/plugins/userdb/ldap.py
|
Python
|
gpl-2.0
| 48,692
| 0.010065
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, defaults
import time, copy, traceback
try:
# docs: http://www.python-ldap.org/doc/html/index.html
import ldap
import ldap.filter
from ldap.controls import SimplePagedResultsControl
# be compatible to both python-ldap below 2.4 and above
try:
LDAP_CONTROL_PAGED_RESULTS = ldap.LDAP_CONTROL_PAGE_OID
ldap_compat = False
except:
LDAP_CONTROL_PAGED_RESULTS = ldap.CONTROL_PAGEDRESULTS
ldap_compat = True
except:
pass
from lib import *
g_ldap_user_cache = {}
g_ldap_group_cache = {}
# File for storing the time of the last success event
g_ldap_sync_time_file = defaults.var_dir + '/web/ldap_sync_time.mk'
# Exists when last ldap sync failed, contains exception text
g_ldap_sync_fail_file = defaults.var_dir + '/web/ldap_sync_fail.mk'
# LDAP attributes are case insensitive, we only use lower case!
# Please note: This are only default values. The user might override this
# by configuration.
ldap_attr_map = {
'ad': {
'user_id': 'samaccountname',
'pw_changed': 'pwdlastset',
},
'openldap': {
'user_id': 'uid',
'pw_changed': 'pwdchangedtime',
# group attributes
'member': 'uniquemember',
},
'389directoryserver': {
'user_id': 'uid',
'pw_changed': 'krbPasswordExpiration',
# group attributes
'member': 'uniquemember',
},
}
# LDAP attributes are case insensitive, we only use lower case!
# Please note: This are only default values. The user might override this
# by configuration.
ldap_filter_map = {
'ad': {
'users': '(&(objectclass=user)(objectcategory=person))',
'groups': '(objectclass=group)',
},
'openldap': {
'users': '(objectclass=person)',
'groups': '(objectclass=groupOfUniqueNames)',
},
'389directoryserver': {
'users': '(objectclass=person)',
'groups': '(objectclass=groupOfUniqueNames)',
},
}
#.
# .-General LDAP code----------------------------------------------------.
# | _ ____ _ ____ |
# | | | | _ \ / \ | _ \ |
# | | | | | | |/ _ \ | |_) | |
# | | |___| |_| / ___ \| __/ |
# | |_____|____/_/ \_\_| |
# | |
# +----------------------------------------------------------------------+
# | General LDAP handling code |
# '----------------------------------------------------------------------'
def ldap_log(s):
if config.ldap_debug_log is not None:
file(ldap_replace_macros(config.ldap_debug_log), "a").write('%s %s\n' %
(time.strftime('%Y-%m-%d %H:%M:%S'), s))
class MKLDAPException(MKGeneralException):
pass
ldap_connection = None
ldap_connection_options = None
def ldap_uri(server):
if 'use_ssl' in config.ldap_connection:
uri = 'ldaps://'
else:
uri = 'ldap://'
return uri + '%s:%d' % (server, config.ldap_connection['port'])
def ldap_test_module():
try:
ldap
except:
raise MKLDAPException(_("The python module python-ldap seems to be missing. You need to "
"install this extension to make the LDAP user connector work."))
def ldap_servers():
servers = [ config.ldap_connection['server'] ]
if config.ldap_connection.get('failover_servers'):
servers += config.ldap_connection.get('failover_servers')
return servers
def ldap_connect_server(server):
try:
uri = ldap_uri(server)
conn = ldap.ldapobject.ReconnectLDAPObject(uri)
conn.protocol_version = config.ldap_connection['version']
conn.network_timeout = config.ldap_connection.get('connect_timeout', 2.0)
conn.retry_delay = 0.5
# When using the domain top level as base-dn, the subtree search stumbles with referral objects.
# whatever. We simply disable them here when using active directory. Hope this fixes all problems.
if config.ldap_connection['type'] == 'ad':
conn.set_option(ldap.OPT_REFERRALS, 0)
ldap_default_bind(conn)
return conn, None
except (ldap.SERVER_DOWN, ldap.TIMEOUT, ldap.LOCAL_ERROR, ldap.LDAPError), e:
return None, '%s: %s' % (uri, e[0].get('info', e[0].get('desc', '')))
except MKLDAPException, e:
return None, str(e)
def ldap_disconnect():
global ldap_connection, ldap_connection_options
ldap_connection = None
ldap_connection_options = None
def ldap_connect(enforce_new = False, enforce_server = None):
global ldap_connection, ldap_connection_options
if not enforce_new \
and not "no_persistent" in config.ldap_connection \
and ldap_connection \
and config.ldap_connection == ldap_connection_options:
ldap_log('LDAP CONNECT - Using existing connecting')
return # Use existing connections (if connection settings have not changed)
else:
ldap_log('LDAP CONNECT - Connecting...')
ldap_test_module()
# Some major config var validations
if not config.ldap_connection.get('server'):
raise MKLDAPException(_('The LDAP connector is enabled in global settings, but the '
'LDAP server to connect to is not configured. Please fix this in the '
'<a href="wato.py?mode=ldap_config">LDAP '
'connection settings</a>.'))
if not config.ldap_userspec.get('dn'):
raise MKLDAPException(_('The distinguished name of the container object, which holds '
'the user objects
|
to be authenticated, is not configured. Please '
'fix this in the <a href="wato.py?mode=ldap_config">'
'LDAP User Settings</a>.'))
try:
errors = []
|
if enforce_server:
servers = [ enforce_server ]
else:
servers = ldap_servers()
for server in servers:
ldap_connection, error_msg = ldap_connect_server(server)
if ldap_connection:
break # got a connection!
else:
errors.append(error_msg)
# Got no connection to any server
if ldap_connection is None:
raise MKLDAPException(_('LDAP connection failed:\n%s') %
('\n'.join(erro
|
tareqalayan/ansible
|
lib/ansible/module_utils/aws/waiters.py
|
Python
|
gpl-3.0
| 9,345
| 0.000428
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
try:
import botocore.waiter as core_waiter
except ImportError:
pass # caught by HAS_BOTO3
ec2_data = {
"version": 2,
"waiters": {
"RouteTableExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeRouteTables",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(RouteTables[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidRouteTableID.NotFound",
"state": "retry"
},
]
},
"SecurityGroupExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSecurityGroups",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(SecurityGroups[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidGroup.NotFound",
"state": "retry"
},
]
},
"SubnetExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "retry"
},
]
},
"SubnetHasMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetNoMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetHasAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetNoAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetDeleted": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"accep
|
tors": [
{
"matcher": "path",
"expected": True,
"argument"
|
: "length(Subnets[]) > `0`",
"state": "retry"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "success"
},
]
},
"VpnGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpnGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(VpnGateways[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidVpnGatewayID.NotFound",
"state": "retry"
},
]
},
}
}
waf_data = {
"version": 2,
"waiters": {
"ChangeTokenInSync": {
"delay": 20,
"maxAttempts": 60,
"operation": "GetChangeTokenStatus",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "ChangeTokenStatus == 'INSYNC'",
"state": "success"
},
{
"matcher": "error",
"expected": "WAFInternalErrorException",
"state": "retry"
}
]
}
}
}
eks_data = {
"version": 2,
"waiters": {
"ClusterActive": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
{
"state": "success",
"matcher": "path",
"argument": "cluster.status",
"expected": "ACTIVE"
},
{
"state": "retry",
"matcher": "error",
"expected": "ResourceNotFoundException"
}
]
}
}
}
def ec2_model(name):
ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
return ec2_models.get_waiter(name)
def waf_model(name):
waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
return waf_models.get_waiter(name)
def eks_model(name):
eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
return eks_models.get_waiter(name)
waiters_by_name = {
('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
'route_table_exists',
ec2_model('RouteTableExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_route_tables
)),
('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
'security_group_exists',
ec2_model('SecurityGroupExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_security_groups
)),
('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
'subnet_exists',
ec2_model('SubnetExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_has_map_public',
ec2_model('SubnetHasMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_no_map_public',
ec2_model('SubnetNoMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_has_assign_ipv6',
ec2_model('SubnetHasAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_no_assign_ipv6',
ec2_model('SubnetNoAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
'subnet_deleted',
ec
|
pbrunet/pythran
|
pythran/transformations/normalize_tuples.py
|
Python
|
bsd-3-clause
| 6,743
| 0
|
""" NormalizeTuples removes implicit variable -> tuple conversion. """
from pythran.analyses import Identifiers
from pythran.passmanager import Transformation
import ast
class _ConvertToTuple(ast.NodeTransformer):
def __init__(self, tuple_id, renamings):
self.tuple_id = tuple_id
self.renamings = renamings
def visit_Name(self, node):
if node.id in self.renamings:
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Index(ast.Num(y)),
ast.Load()),
self.renamings[node.id],
ast.Name(self.tuple_id, ast.Load())
)
nnode.ctx = node.ctx
return nnode
return node
class NormalizeTuples(Transformation):
"""
Remove implicit tuple -> variable conversion.
>>> import ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(): a=(1,2.) ; i,j = a")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(NormalizeTuples, node)
>>> print pm.dump(backend.Python, node)
def foo():
a = (1, 2.0)
i = a[0]
j = a[1]
"""
tuple_name = "__tuple"
def __init__(self):
Transformation.__init__(self)
def get_new_id(self):
i = 0
while 1:
new_id = "{}{}".format(NormalizeTuples.tuple_name, i)
if new_id not in self.ids:
self.ids.add(new_id)
return new_id
else:
i += 1
def traverse_tuples(self, node, state, renamings):
if isinstance(node, ast.Name):
if state:
renamings[node.id] = state
self.update = True
elif isinstance(node, ast.Tuple) or isinstance(node, ast.List):
[self.traverse_tuples(n, state + (i,), renamings)
for i, n in enumerate(node.elts)]
elif isinstance(node, (ast.Subscript, ast.Attribute)):
if state:
renamings[node] = state
self.update = True
else:
raise NotImplementedError
def visit_comprehension(self, node):
renamings = dict()
self.traverse_tuples(node.target, (), renamings)
if renamings:
self.update = True
return self.get_new_id(), renamings
else:
return node
def visit_AnyComp(self, node, *fields):
for field in fields:
setattr(node, field, self.visit(getattr(node, field)))
generators = map(self.visit, node.generators)
nnode = node
for i, g in enumerate(generators):
if isinstance(g, tuple):
gtarget = "{0}{1}".format(g[0], i)
nnode.generators[i].target = ast.Name(
gtarget,
nnode.generators[i].target.ctx)
nnode = _ConvertToTuple(gtarget, g[1]).visit(nnode)
self.update = True
for field in fields:
setattr(node, field, getattr(nnode, field))
node.generators = nnode.generators
return node
def visit_ListComp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_SetComp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_DictComp(self, node):
return self.visit_AnyComp(node, 'key', 'value')
def visit_GeneratorExp(self, node):
return self.visit_AnyComp(node, 'elt')
def visit_Lambda(self, node):
self.generic_visit(node)
for i, arg in enumerate(node.args.args):
renamings = dict()
self.traverse_tuples(arg, (), renamings)
if renamings:
nname = self.get_new_id()
node.args.args[i] = ast.Name(nname, ast.Param())
node.body = _ConvertToTuple(nname, renamings).visit(node.body)
return node
def visit_Assign(self, node):
self.generic_visit(node)
# if the rhs is an identifier, we don't need to duplicate it
# otherwise, better duplicate it...
no_tmp = isinstance(node.value, ast.Name)
extra_assign = [] if no_tmp else [node]
for i, t in enumerate(node.targets):
|
if isinstance(t, ast.T
|
uple) or isinstance(t, ast.List):
renamings = dict()
self.traverse_tuples(t, (), renamings)
if renamings:
gtarget = node.value.id if no_tmp else self.get_new_id()
node.targets[i] = ast.Name(gtarget, node.targets[i].ctx)
for rename, state in sorted(renamings.iteritems()):
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Index(ast.Num(y)),
ast.Load()),
state,
ast.Name(gtarget, ast.Load()))
if isinstance(rename, str):
extra_assign.append(
ast.Assign(
[ast.Name(rename, ast.Store())],
nnode))
else:
extra_assign.append(ast.Assign([rename], nnode))
return extra_assign or node
def visit_For(self, node):
target = node.target
if isinstance(target, ast.Tuple) or isinstance(target, ast.List):
renamings = dict()
self.traverse_tuples(target, (), renamings)
if renamings:
gtarget = self.get_new_id()
node.target = ast.Name(gtarget, node.target.ctx)
for rename, state in sorted(renamings.iteritems()):
nnode = reduce(
lambda x, y: ast.Subscript(
x,
ast.Index(ast.Num(y)),
ast.Load()),
state,
ast.Name(gtarget, ast.Load()))
if isinstance(rename, str):
node.body.insert(0,
ast.Assign(
[ast.Name(rename, ast.Store())],
nnode)
)
else:
node.body.insert(0, ast.Assign([rename], nnode))
self.generic_visit(node)
return node
def visit_FunctionDef(self, node):
self.ids = self.passmanager.gather(Identifiers, node, self.ctx)
return self.generic_visit(node)
|
spulec/moto
|
moto/secretsmanager/urls.py
|
Python
|
apache-2.0
| 166
| 0
|
from .responses import SecretsManagerResponse
url_bases = [r"https?://secr
|
etsmanager\.(.+)\.amazonaws\.com"]
url_paths = {"{0}/$": SecretsManagerResponse.dispatch}
| |
josherich/mindynode-parsers
|
mindynode_nltk/migrations/0010_keywordsum_keyword_category.py
|
Python
|
mit
| 519
| 0.001942
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-08-02 20:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mindynode_nltk', '0009_auto_20170802_2046'),
]
operations = [
migrations.AddField(
model_name='keywordsum',
|
name='keyword_category',
field=models.CharField(default
|
='china', max_length=255, null=True, verbose_name='类别'),
),
]
|
czpython/django-cms
|
cms/admin/forms.py
|
Python
|
bsd-3-clause
| 48,437
| 0.001755
|
# -*- coding: utf-8 -*-
from django import forms
from django.apps import apps
from django.contrib.auth import get_user_model, get_permission_codename
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.template.defaultfilters import slugify
from django.utils.encoding import force_text
from django.utils.translation import ugettext, ugettext_lazy as _
from cms import api
from cms.apphook_pool import apphook_pool
from cms.exceptions import PluginLimitReached
from cms.extensions import extension_pool
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_DIRTY, ROOT_USER_LEVEL
from cms.forms.validators import validate_relative_url, validate_url_uniqueness
from cms.forms.widgets import UserSelectAdminWidget, AppHookSelect, ApplicationConfigSelect
from cms.models import (CMSPlugin, Page, PageType, PagePermission, PageUser, PageUserGroup, Title,
Placeholder, GlobalPagePermission, TreeNode)
from cms.models.permissionmodels import User
from cms.plugin_pool import plugin_pool
from cms.signals.apphook import set_restart_trigger
from cms.utils.conf import get_cms_setting
from cms.utils.compat.forms import UserChangeForm
from cms.utils.i18n import get_language_list, get_language_object
from cms.utils.permissions import (
get_current_user,
get_subordinate_users,
get_subordinate_groups,
get_user_permission_level,
)
from menus.menu_pool import menu_pool
def get_permission_accessor(obj):
User = get_user_model()
if isinstance(obj, (PageUser, User,)):
rel_name = 'user_permissions'
else:
rel_name = 'permissions'
return getattr(obj, rel_name)
def get_page_changed_by_filter_choices():
# This is not site-aware
# Been like this forever
# Would be nice for it to filter out by site
values = (
Page
.objects
.filter(publisher_is_draft=True)
.distinct()
.order_by('changed_by')
.values_list('changed_by', flat=True)
)
yield ('', _('All'))
for value in values:
yield (value, value)
def get_page_template_filter_choices():
yield ('', _('All'))
for value, name in get_cms_setting('TEMPLATES'):
yield (value, name)
def save_permissions(data, obj):
models = (
(Page, 'page'),
(PageUser, 'pageuser'),
(PageUserGroup, 'pageuser'),
(PagePermission, 'pagepermission'),
)
if not obj.pk:
# save obj, otherwise we can't assign permissions to him
obj.save()
permission_accessor = get_permission_accessor(obj)
for model, name in models:
content_type = ContentType.objects.get_for_model(model)
for key in ('add', 'change', 'delete'):
# add permission `key` for model `model`
codename = get_permission_codename(key, model._meta)
permission = Permission.objects.get(content_type=content_type, codename=codename)
field = 'can_%s_%s' % (key, name)
if data.get(field):
permission_accessor.add(permission)
elif field in data:
permission_accessor.remove(permission)
class CopyPermissionForm(forms.Form):
"""
Holds the specific field for permissions
"""
copy_permissions = forms.BooleanField(
label=_('Copy permissions'),
required=False,
initial=True,
)
class BasePageForm(forms.ModelForm):
_user = None
_site = None
_language = None
title = forms.CharField(label=_("Title"), max_length=255, widget=forms.TextInput(),
help_text=_('The default title'))
slug = forms.CharField(label=_("Slug"), max_length=255, widget=forms.TextInput(),
help_text=_('The part of the title that is used in the URL'))
menu_title = forms.CharField(label=_("Menu Title"), widget=forms.TextInput(),
help_text=_('Overwrite what is displayed in the menu'), required=False)
page_title = forms.CharField(label=_("Page Title"), widget=forms.TextInput(),
help_text=_('Overwrites what is displayed at the top of your browser or in bookmarks'),
required=False)
meta_description = forms.CharField(label=_('Description meta tag'), required=False,
widget=forms.Textarea(attrs={'maxlength': '155', 'rows': '4'}),
help_text=_('A description of the page used by search engines.'),
max_length=155)
class Meta:
model = Page
fields = []
def clean_slug(self):
slug = slugify(self.cleaned_data['slug'])
if not slug:
raise ValidationError(_("Slug must not be empty."))
return slug
class AddPageF
|
orm(BasePageForm):
source = forms.ModelChoiceField(
label=_(u'Page type'),
queryset=Page.objects.filter(
is_page_type=True,
publisher_is_draft=True,
),
required=False,
)
parent_node = forms.ModelChoiceField(
queryset=TreeNode.objects.all(),
required=False,
widget=forms.HiddenInput(),
)
class Meta:
model = Page
fields = ['source']
def __init__(self, *args, *
|
*kwargs):
super(AddPageForm, self).__init__(*args, **kwargs)
source_field = self.fields.get('source')
if not source_field or source_field.widget.is_hidden:
return
root_page = PageType.get_root_page(site=self._site)
if root_page:
# Set the choicefield's choices to the various page_types
descendants = root_page.get_descendant_pages().filter(is_page_type=True)
titles = Title.objects.filter(page__in=descendants, language=self._language)
choices = [('', '---------')]
choices.extend((title.page_id, title.title) for title in titles)
source_field.choices = choices
else:
choices = []
if len(choices) < 2:
source_field.widget = forms.HiddenInput()
def clean(self):
data = self.cleaned_data
if self._errors:
# Form already has errors, best to let those be
# addressed first.
return data
parent_node = data.get('parent_node')
if parent_node:
slug = data['slug']
parent_path = parent_node.item.get_path(self._language)
path = u'%s/%s' % (parent_path, slug) if parent_path else slug
else:
path = data['slug']
try:
# Validate the url
validate_url_uniqueness(
self._site,
path=path,
language=self._language,
)
except ValidationError as error:
self.add_error('slug', error)
else:
data['path'] = path
return data
def clean_parent_node(self):
parent_node = self.cleaned_data.get('parent_node')
if parent_node and parent_node.site_id != self._site.pk:
raise ValidationError("Site doesn't match the parent's page site")
return parent_node
def create_translation(self, page):
data = self.cleaned_data
title_kwargs = {
'page': page,
'language': self._language,
'slug': data['slug'],
'path': data['path'],
'title': data['title'],
}
if 'menu_title' in data:
title_kwargs['menu_title'] = data['menu_title']
if 'page_title' in data:
title_kwargs['page_title'] = data['page_title']
if 'meta_description' in data:
title_kwargs['meta_description'] = data['meta_description']
return api.create_title(**title_kwargs)
def from_source(self, source, parent=None):
new_page =
|
rdkit/rdkit-orig
|
rdkit/ML/test_list.py
|
Python
|
bsd-3-clause
| 523
| 0.059273
|
tests=[
("python","UnitTestBuildComposite.py",{}),
("python","UnitTestScreenComposite.py",{
|
}),
("python","UnitTestAnalyzeComposite.py",{}),
]
for dir in ['Cluster','Composite','Data','DecTree','Descriptors','FeatureSelect','InfoTheory','KNN','ModelPackage','NaiveBayes','Neural','SLT']:
tests.append(('python','test_list.py',{'dir':dir}))
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(faile
|
d))
|
Vector35/binaryninja-api
|
python/decorators.py
|
Python
|
mit
| 380
| 0.026316
|
def pass
|
ive(cls):
passive_note = '''
.. note:: This object is a "passive" object. Any changes you make to it will not be reflected in the core and vice-versa. If you wish to update a core version of this object you should use the appropriate API.
'''
if hasattr(cls, "__doc__") and cls.__doc__:
cls.__doc__ += passive_note
else:
cls.__do
|
c__ = passive_note
return cls
|
uboness/sublime-plugins
|
Intellij/Intellij.py
|
Python
|
apache-2.0
| 2,360
| 0.007627
|
import sublime, sublime_plugin
import functools
import os
import shutil
class IntellijCopyCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
selection = v.sel();
if len(selection) == 0:
v.run_command('expand_selection', { "to": "line" })
v.run_command('copy')
class IntellijCutCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
selection = v.sel();
if len(selection) == 0:
v.run_command('expand_selection', { "to": "line" })
v.run_command('cut')
class IntellijRenameFileCommand(sublime_plugin.WindowCommand):
def run(self):
window = self.window
view = window.active_view()
filename = view.file_name()
if filename == None:
return
branch, leaf = os.path.split(filename)
v = window.show_input_panel("New Name:", leaf, functools.partial(self.on_done, filename, branch, view), None, None)
name, ext = os.path.splitext(leaf)
v.sel().clear()
v.sel().add(sublime.Region(0, len(name)))
def on_done(self, old, branch, view, leaf):
new = os.path.join(branch, leaf)
try:
os.rename(old, new)
print 'finding open file [' + old + ']'
|
# v = self.window.find_open_file(o
|
ld)
if view != None:
view.retarget(new)
except:
sublime.status_message("Unable to rename")
class IntellijCopyFileCommand(sublime_plugin.WindowCommand):
def run(self):
window = self.window
view = window.active_view()
filename = view.file_name()
if filename == None:
return
branch, leaf = os.path.split(filename)
v = window.show_input_panel("New File Name:", filename, functools.partial(self.on_done, filename), None, None)
name, ext = os.path.splitext(leaf)
v.sel().clear()
start_index = len(filename) - len(leaf)
v.sel().add(sublime.Region(start_index, start_index + len(name)))
def on_done(self, src_path, dest_path):
try:
shutil.copyfile(src_path, dest_path)
self.window.open_file(dest_path)
if view != None:
view.retarget(new)
except:
sublime.status_message("Unable to rename")
|
tanchao/algo
|
leetcode/py/75_sort_colors.py
|
Python
|
mit
| 579
| 0.008636
|
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
red, white, blue = 0, 0, len(nums) - 1
while wh
|
ite <= blue:
|
if nums[white] == 0: # red
nums[red], nums[white] = nums[white], nums[red]
red += 1
white += 1
elif nums[white] == 1: # white
white += 1
else: # blue
nums[blue], nums[white] = nums[white], nums[blue]
blue -= 1
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Plot/Markov.py
|
Python
|
mit
| 8,854
| 0.038288
|
import numpy as np;
np.set_printoptions(linewidth=40, precision=5, suppress=True)
import pandas as pd; pd.options.display.max_rows=80;pd.options.display.expand_frame_repr=False;pd.options.display.max_columns=20
im
|
port pylab as plt;
import os; home=os.path.expanduser('~') +'/'
import sys;sys.path.insert(1,'/home/arya/workspace/bio/')
from CLEAR.Libs.Markov import Markov
import Utils.Util as utl
import Utils.Simulation as Simulation
import matplotlib as mpl
import seaborn as sns
import Utils.Plots as pplt
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 56});
mpl.rc('text', usetex=True)
sns.s
|
et_style("whitegrid", {"grid.color": "1", 'axes.linewidth': .5, "grid.linewidth": ".09"})
subptitle = list('ABCDEFGHI')
def createSelectionSimulations(s=0.1,maxGen=100):
def runSim(i):
try:
sim = Simulation.Simulation(maxGeneration=maxGen, generationStep=1, s=s, foldInitialAFs=False,
ModelName='Markov', L=1000, numReplicates=1,
makeSureSelectedSiteDontGetLost=False, experimentID=0)
x=sim.X[:,sim.siteUnderSelection,0]
except:
x=np.zeros(sim.X.shape[0])
x[:]=None
if not i%1000: print s, i
return x
X=map(runSim,range(10000))
a=pd.DataFrame(X)
a.to_pickle(utl.outpath + 'markov/T{}.S{:03.0f}.obs.df'.format(maxGen, s * 1000))
print 'done!'
def plotNull(subp, nu0=0.005, fontsize=5):
obs = pd.read_pickle(utl.outpath + 'markov/neutral.obs.{}.pkl'.format(nu0))
T = Markov.computeTransition(0, N=1000)
dfplt = pd.concat([pd.Series({'scale': 10, 'xlim': [0.0, 0.01], 'ylim': [0, 1]}, name=(0.005, 1)),
pd.Series({'scale': 30, 'xlim': [0.06, 0.14], 'ylim': [0, 0.15]}, name=(0.1, 1)),
pd.Series({'scale': 30, 'xlim': [0.0, 0.015], 'ylim': [0, 0.3]}, name=(0.005, 10)),
pd.Series({'scale': 45, 'xlim': [0.0, 0.2], 'ylim': [0, 0.025]}, name=(0.1, 10)),
pd.Series({'scale':30, 'xlim':[0.0,0.03],'ylim': [0,0.2]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
xx=np.arange(0,1,0.00001)
N=200; tau=1;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx);
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.subplot(3, 3, subp[0]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[1].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[1].flatten(),bins=500,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*4)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0)$')
tau=10
for _ in range(9):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[10].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[10].flatten(),bins=100,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*20)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
tau=100
for _ in range(90):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2]);
brownian.plot(color='r');
markov.plot(color='b')
o=pd.Series(obs.X[100].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[100].flatten(),bins=30,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*60)
o.name = 'Observation';
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
if subp[2] == 3:
plt.legend(loc='center right', fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[2] - 1]), fontsize=fontsize)
def plotAlternative(subp, s=0.1, fontsize=5):
nu0=0.005
obs = pd.read_pickle(utl.outpath + 'markov/T100.S{:03.0f}.obs.df'.format(s * 1000))
T = Markov.computeTransition(s, 1000)
dfplt= pd.concat([pd.Series({'scale':10, 'xlim':[0.0,0.01],'ylim': [0,0.2]},name=(0.005,1)),pd.Series({'scale':30, 'xlim':[0.06,0.14],'ylim': [0,0.15]},name=(0.1,1)),
pd.Series({'scale':30, 'xlim':[0.0,0.015],'ylim': [0,0.15]},name=(0.005,10)),pd.Series({'scale':45, 'xlim':[0.0,0.2],'ylim': [0,0.025]},name=(0.1,10)),
pd.Series({'scale':30, 'xlim':[0.0,1],'ylim': [0,0.01]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
plt.subplot(3, 3, subp[0])
tau=1
o=(obs[1].value_counts().sort_index()/obs.shape[0])
o.loc[0.0055]=0.1211
o.index=o.index-0.0005/2
markov.plot(color='b');
o.plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0,s)$')
plt.xlabel('$s$')
tau=10
for _ in range(9):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1])
markov.plot(color='b');
(obs[10].value_counts().sort_index() / obs.shape[0]).plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
tau=100
for _ in range(90):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2])
counts,limits=np.histogram(obs[100].values,bins=50,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/obs.shape[0]
o/=35
o.loc[0.0] = o.iloc[0]
o = o.sort_index()
o.iloc[1] = o.iloc[2]
# o=(obs[100].value_counts().sort_index()/obs.shape[0])
o.name = 'Observation';
o.plot(color='g');
markov.plot(color='b');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
|
memento7/KINCluster
|
KINCluster/__init__.py
|
Python
|
mit
| 1,038
| 0
|
"""
KINCluster is clustering like KIN.
release note:
- version 0.1.6
fix settings
update pipeline
delete unused arguments
fix convention by pylint
now logging
- version 0.1.5.5
fix using custom settings
support both moudle and dic
|
t
- version 0.1.5.4
Update tokenizer, remove stopwords eff
- version 0.1.5.3
now custom setting available.
see settings.py
- version 0.1.5.2
change item, extractor, pipeline module
now, pipeline.dress_item pass just item(extractor.dump)
fix prev versions error (too many value to unpack)
"""
__version__ = '0.1.6'
__all__ = ['KINCluster',
'Cluster', 'Extractor', 'Item', 'Pipeline',
'tokenizer', 'stopwords']
from KINCluster.KINCluster i
|
mport KINCluster
from KINCluster.core.cluster import Cluster
from KINCluster.core.extractor import Extractor
from KINCluster.core.item import Item
from KINCluster.core.pipeline import Pipeline
from KINCluster.lib.tokenizer import tokenizer
from KINCluster.lib.stopwords import stopwords
|
great-expectations/great_expectations
|
great_expectations/core/evaluation_parameters.py
|
Python
|
apache-2.0
| 17,237
| 0.002843
|
import copy
import datetime
import logging
import math
import operator
import traceback
from collections import namedtuple
from typing import Any, Dict, Optional, Tuple
from pyparsing import (
CaselessKeyword,
Combine,
Forward,
Group,
Literal,
ParseException,
Regex,
Suppress,
Word,
alphanums,
alphas,
delimitedList,
dictOf,
)
from great_expectations.core.urn import ge_urn
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.exceptions import EvaluationParameterError
logger = logging.getLogger(__name__)
_epsilon = 1e-12
class EvaluationParameterParser:
"""
This Evaluation Parameter Parser uses pyparsing to provide a basic expression language capable of evaluating
parameters using va
|
lues available only at run time.
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
The parser is modified from: ht
|
tps://github.com/pyparsing/pyparsing/blob/master/examples/fourFn.py
"""
# map operator symbols to corresponding arithmetic operations
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"sgn": lambda a: -1 if a < -_epsilon else 1 if a > _epsilon else 0,
"now": datetime.datetime.now,
"datetime": datetime.datetime,
"timedelta": datetime.timedelta,
}
def __init__(self):
self.exprStack = []
self._parser = None
def push_first(self, toks):
self.exprStack.append(toks[0])
def push_unary_minus(self, toks):
for t in toks:
if t == "-":
self.exprStack.append("unary -")
else:
break
def clear_stack(self):
del self.exprStack[:]
def get_parser(self):
self.clear_stack()
if not self._parser:
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?(?:\d+|\.\d+)(?:\.\d+)?(?:[eE][+-]?\d+)?")
ge_urn = Combine(
Literal("urn:great_expectations:")
+ Word(alphas, f"{alphanums}_$:?=%.&")
)
variable = Word(alphas, f"{alphanums}_$")
ident = ge_urn | variable
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# We will allow functions either to accept *only* keyword
# expressions or *only* non-keyword expressions
# define function keyword arguments
key = Word(f"{alphas}_") + Suppress("=")
# value = (fnumber | Word(alphanums))
value = expr
keyval = dictOf(key.setParseAction(self.push_first), value)
kwarglist = delimitedList(keyval)
# add parse action that replaces the function identifier with a (name, number of args, has_fn_kwargs) tuple
# 20211009 - JPC - Note that it's important that we consider kwarglist
# first as part of disabling backtracking for the function's arguments
fn_call = (ident + lpar + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), 0, False))
) | (
(ident + lpar - Group(expr_list) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0]), False))
)
^ (ident + lpar - Group(kwarglist) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0]), True))
)
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(self.push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(self.push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(self.push_first)[...]
term = factor + (multop + factor).setParseAction(self.push_first)[...]
expr <<= term + (addop + term).setParseAction(self.push_first)[...]
self._parser = expr
return self._parser
def evaluate_stack(self, s):
op, num_args, has_fn_kwargs = s.pop(), 0, False
if isinstance(op, tuple):
op, num_args, has_fn_kwargs = op
if op == "unary -":
return -self.evaluate_stack(s)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = self.evaluate_stack(s)
op1 = self.evaluate_stack(s)
return self.opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
# note: args are pushed onto the stack in reverse order
if has_fn_kwargs:
kwargs = dict()
for _ in range(num_args):
v = self.evaluate_stack(s)
k = s.pop()
kwargs.update({k: v})
return self.fn[op](**kwargs)
else:
args = reversed([self.evaluate_stack(s) for _ in range(num_args)])
return self.fn[op](*args)
else:
# try to evaluate as int first, then as float if int fails
# NOTE: JPC - 20200403 - Originally I considered returning the raw op here if parsing as float also
# fails, but I decided against it to instead require that the *entire* expression evaluates
# numerically UNLESS there is *exactly one* expression to substitute (see cases where len(L) == 1 in the
# parse_evaluation_parameter method.
try:
return int(op)
except ValueError:
return float(op)
def build_evaluation_parameters(
expectation_args: dict,
evaluation_parameters: Optional[dict] = None,
interactive_evaluation: bool = True,
data_context=None,
) -> Tuple[dict, dict]:
"""Build a dictionary of parameters to evaluate, using the provided evaluation_parameters,
AND mutate expectation_args by removing any parameter values passed in as temporary values during
exploratory work.
"""
evaluation_args = copy.deepcopy(expectation_args)
substituted_parameters = {}
# Iterate over arguments, and replace $PARAMETER-defined args with their
# specified parameters.
for key, value in evaluation_args.items():
if isinstance(value, dict) and "$PARAMETER" in value:
# We do not even need to search for a value if we are not going to do interactive evaluation
if not interactive_evaluation:
continue
# First, ch
|
tryexceptpass/sofi
|
sofi/app/__init__.py
|
Python
|
mit
| 23
| 0
|
from
|
.app import Sofi
| |
seanballais/botos
|
tests/test_results_exporter_view.py
|
Python
|
gpl-3.0
| 18,942
| 0.00132
|
import io
import openpyxl
from django.test import (
Client, TestCase
)
from django.urls import reverse
from core.models import (
User, Batch, Section, Election, Candidate, CandidateParty,
CandidatePosition, Vote, VoterProfile, Setting, UserType
)
class ResultsExporter(TestCase):
"""
Tests the results xlsx exporter view.
This subview may only process requests from logged in admin users. Other
users will be redirected to '/'. This will also only accept GET requests.
GET requests may have an election`parameter whose value must be the id
of an election. The lack of an election parameter will result in the
results of all elections to be exported, with each election having its
own worksheet. Other URL parameters will be ignored. Invalid election
parameter values, e.g. non-existent election IDs and non-integer parameters,
will return an error message.
View URL: '/results/export'
"""
@classmethod
def setUpTestData(cls):
batch_num = 0
section_num = 0
voter_num = 0
party_num = 0
position_num = 0
candidate_num = 0
num_elections = 2
voters = list()
positions = dict()
for i in range(num_elections):
election = Election.objects.create(name='Election {}'.format(i))
positions[str(election.name)] = list()
num_batches = 2
for j in range(num_batches):
batch = Batch.objects.create(year=batch_num, election=election)
batch_num += 1
num_sections = 2 if j == 0 else 1
for k in range(num_sections):
section = Section.objects.create(
section_name=str(section_num)
)
section_num += 1
num_students = 2
for l in range(num_students):
voter = User.objects.create(
username='user{}'.format(voter_num),
first_name=str(voter_num),
last_name=str(voter_num),
type=UserType.VOTER
)
voter.set_password('voter')
voter.save()
voter_num += 1
VoterProfile.objects.create(
user=voter,
batch=batch,
section=section
)
voters.append(voter)
num_positions = 3
for i in range(num_positions):
position = CandidatePosition.objects.create(
position_name='Position {}'.format(position_num),
election=election
)
positions[str(election.name)].append(position)
position_num += 1
num_parties = 3
for j in range(num_parties):
party = CandidateParty.objects.create(
party_name='Party {}'.format(party_num),
election=election
)
party_num += 1
if j != 2: # Let every third party have no candidates.
num_positions = 3
for k in range(num_positions):
position = positions[str(election.name)][k]
candidate = Candidate.objects.create(
user=voters[candidate_num],
party=party,
position=position,
election=election
)
Vote.objects.create(
user=voters[candidate_num],
candidate=candidate,
election=election
)
candidate_num += 1
# Let's give one candidate an additional vote to really make sure that
# we all got the correct number of votes.
Vote.objects.create(
user=voters[0],
# NOTE: The voter in voter[1] is a Position 1 candidate of
# Party 1, where the voter in voter[0] is a member.
candidate=Candidate.objects.get(user=voters[1]),
election=Election.objects.get(name='Election 0')
)
_admin = User.objects.create(username='admin', type=UserType.ADMIN)
_admin.set_password('root')
_admin.save()
def setUp(self):
self.client.login(username='admin', password='root')
def test_anonymous_get_requests_redirected_to_index(self):
self.client.logout()
response = self.client.get(reverse('results-export'), follow=True)
self.assertRedirects(response, '/?next=%2Fad
|
min%2Fresults')
def test_voter_get_requests_redirected_to_index(self):
self.client.logout()
self.client.login(username='user0', password='voter')
response = self.client.get(reverse('results-export'), follow=True)
self.assertRedirects(response, reverse('index'))
def
|
test_get_all_elections_xlsx(self):
response = self.client.get(reverse('results-export'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Election Results.xlsx"'
)
wb = openpyxl.load_workbook(io.BytesIO(response.content))
self.assertEqual(len(wb.worksheets), 2)
# Check first worksheet.
ws = wb.worksheets[0]
self.assertEqual(wb.sheetnames[0], 'Election 0')
row_count = ws.max_row
col_count = ws.max_column
self.assertEqual(row_count, 25)
self.assertEqual(col_count, 5)
self.assertEqual(str(ws.cell(1, 1).value), 'Election 0 Results')
self.assertEqual(str(ws.cell(2, 1).value), 'Candidates')
cellContents = [
'Position 0',
'Party 0',
'0, 0',
'Party 1',
'3, 3',
'Party 2',
'None',
'Position 1',
'Party 0',
'1, 1',
'Party 1',
'4, 4',
'Party 2',
'None',
'Position 2',
'Party 0',
'2, 2',
'Party 1',
'5, 5',
'Party 2',
'None'
]
for cellIndex, content in enumerate(cellContents, 5):
self.assertEqual(str(ws.cell(cellIndex, 1).value), content)
self.assertEqual(str(ws.cell(2, 2).value), 'Number of Votes')
self.assertEqual(str(ws.cell(3, 2).value), '0')
self.assertEqual(str(ws.cell(4, 2).value), '0') # Section
self.assertEqual(str(ws.cell(7, 2).value), '1')
self.assertEqual(str(ws.cell(9, 2).value), '0')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 2).value), '2')
self.assertEqual(str(ws.cell(16, 2).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 2).value), '0')
self.assertEqual(str(ws.cell(23, 2).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(4, 3).value), '1') # Section
self.assertEqual(str(ws.cell(7, 3).value), '0')
self.assertEqual(str(ws.cell(9, 3).value), '1')
self.assertEqual(str(ws.cell(11, 2).value), 'N/A')
self.assertEqual(str(ws.cell(14, 3).value), '0')
self.assertEqual(str(ws.cell(16, 3).value), '0')
self.assertEqual(str(ws.cell(18, 2).value), 'N/A')
self.assertEqual(str(ws.cell(21, 3).value), '1')
self.assertEqual(str(ws.cell(23, 3).value), '0')
self.assertEqual(str(ws.cell(25, 2).value), 'N/A')
self.assertEqual(str(ws.cell(3, 4).value), '1')
self.assertEqual(str(ws.cell(4, 4).value), '2') # Section
self.assertEqual(str(ws.cell(7, 4).value), '0')
self.assertEqual(str(ws.cel
|
thegooglecodearchive/phonetooth
|
phonetooth/bluetoothdiscovery.py
|
Python
|
gpl-2.0
| 1,814
| 0.012679
|
# Copyright (C) 2008 Dirk Vanden Boer <dirk.vdb@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import bluetooth
class BluetoothDevice:
def __init__(self, address, port, deviceName, serviceName):
self.address = address
self.port = port
self.deviceName = deviceName
self.serviceName = serviceName
def _
|
_str__(self):
return self.name + '(' + self.serviceName + ') - ' + self.address + ':' + str(self.port)
class BluetoothDiscovery:
def findSerialDevices(self):
devices = bluetooth.discover_devices(duration = 5, lookup_names = False, flush_cache = True)
serialDevices = []
for address in devices:
services = bluetooth.find_service(uuid = bluetooth.SERIAL_PORT_CLASS, address = address)
services.extend(bluetooth.find_service(uuid
|
= bluetooth.DIALUP_NET_CLASS))
for service in services:
serialDevices.append(BluetoothDevice(service['host'], service['port'], bluetooth.lookup_name(service['host']), service['name']))
return serialDevices
|
pyohei/rirakkuma-crawller
|
tweet.py
|
Python
|
mit
| 565
| 0.001898
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
""" Tweet rirra
|
kuma 4kuma submit.
"""
import tweepy
TWEET_CONTENT = (
"リラックマの4クマ漫画が更新されました!\n"
"http://www.shufu.co.jp/contents/4kuma/"
)
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
def main():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth_handler=auth)
api.update_status(status=TWEET_CO
|
NTENT)
if __name__ == '__main__':
main()
|
openprocurement/openprocurement.auctions.dgf
|
openprocurement/auctions/dgf/views/other/lot.py
|
Python
|
apache-2.0
| 3,339
| 0.004193
|
# -*- coding: utf-8 -*-
from openprocurement.auctions.core.utils import (
apply_patch,
context_unpack,
get_now,
json_view,
opresource,
save_auction,
)
from openprocurement.auctions.core.validation import (
validate_lot_data,
validate_patch_lot_data,
)
from openprocurement.auctions.core.views.mixins import AuctionLotResource
@opresource(name='dgfOtherAssets:Auction Lots',
collection_path='/auctions/{auction_id}/lots',
path='/auctions/{auction_id}/lots/{lot_id}',
auctionsprocurementMethodType="dgfOtherAssets",
description="Auction lots")
class AuctionLotResource(AuctionLotResource):
@json_view(content_type="application/json", validators=(validate_lot_data,), permission='edit_auction')
def collection_post(self):
"""Add a lot
"""
auction = self.request.validated['auction']
if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t add lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
lot = self.request.validated['lot']
lot.date = get_now()
auction.lots.append(lot)
if save_auction(self.request):
self.LOGGER.info('Created auction lot {}'.format(lot.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_create'}, {'lot_id': lot.id}))
self.request.response.status = 201
route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=route, lot_id=lot.id, _query={})
return {'data': lot.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_lot_data,), permission='edit_auction')
def patch(self):
"""Update of lot
"""
auction = self.request.validated['auction']
if auction.status not in ['active.tendering']:
self.request.errors.add('body', 'data', 'Can\'t update lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
self.LOGGER.info('Updated auction lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_patch'}))
return {'data': self.request.context.serialize("view")}
@json_view(permission='edit_auction')
def delete(self):
"""Lot deleting
"""
auction = self.request.validated['auction']
if auction.status n
|
ot in ['active.tenderin
|
g']:
self.request.errors.add('body', 'data', 'Can\'t delete lot in current ({}) auction status'.format(auction.status))
self.request.errors.status = 403
return
lot = self.request.context
res = lot.serialize("view")
auction.lots.remove(lot)
if save_auction(self.request):
self.LOGGER.info('Deleted auction lot {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_delete'}))
return {'data': res}
|
AlexCatarino/Lean
|
Algorithm.Python/DelistingEventsAlgorithm.py
|
Python
|
apache-2.0
| 3,448
| 0.007251
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <
|
summary>
### Demonstration of using the Delisting event in your algorithm. Assets are delisted on their last day of trading, or when their contract expires.
### This data is not included in the open source projec
|
t.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="data event handlers" />
### <meta name="tag" content="delisting event" />
class DelistingEventsAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2007, 5, 16) #Set Start Date
self.SetEndDate(2007, 5, 25) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("AAA.1", Resolution.Daily)
self.AddEquity("SPY", Resolution.Daily)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self.Transactions.OrdersCount == 0:
self.SetHoldings("AAA.1", 1)
self.Debug("Purchased stock")
for kvp in data.Bars:
symbol = kvp.Key
value = kvp.Value
self.Log("OnData(Slice): {0}: {1}: {2}".format(self.Time, symbol, value.Close))
# the slice can also contain delisting data: data.Delistings in a dictionary string->Delisting
aaa = self.Securities["AAA.1"]
if aaa.IsDelisted and aaa.IsTradable:
raise Exception("Delisted security must NOT be tradable")
if not aaa.IsDelisted and not aaa.IsTradable:
raise Exception("Securities must be marked as tradable until they're delisted or removed from the universe")
for kvp in data.Delistings:
symbol = kvp.Key
value = kvp.Value
if value.Type == DelistingType.Warning:
self.Log("OnData(Delistings): {0}: {1} will be delisted at end of day today.".format(self.Time, symbol))
# liquidate on delisting warning
self.SetHoldings(symbol, 0)
if value.Type == DelistingType.Delisted:
self.Log("OnData(Delistings): {0}: {1} has been delisted.".format(self.Time, symbol))
# fails because the security has already been delisted and is no longer tradable
self.SetHoldings(symbol, 1)
def OnOrderEvent(self, orderEvent):
self.Log("OnOrderEvent(OrderEvent): {0}: {1}".format(self.Time, orderEvent))
|
mhorn71/StarinetPythonLogger
|
utilities/staribuscrc.py
|
Python
|
gpl-2.0
| 2,214
| 0.004065
|
__author__ = 'mark'
# StarinetPython3Logger a data logger for the Beaglebone Black.
# Copyright (C) 2015 Mark Horn
#
# This file is part of StarinetPython3Logger.
#
# StarinetPython3Logger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# StarinetPython3Logger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with StarinetPython3Logger. If not, see
|
<http://www.gnu.org/lic
|
enses/>.
import crcmod
import logging
import sys
## Set crc16 parameters to polynomial 8408, initial value 0xffff, reversed True, Final XOR value 0x00
crc16 = crcmod.mkCrcFun(0x018408, 0xFFFF, True, 0x0000)
## initialise logger
logger = logging.getLogger('utilities.staribuscCrc')
def checkcrc(buffer0):
logger.debug("Check crc was called.")
buffer0 = buffer0.encode('utf-8')
rxcrc = buffer0[-4:] # assign the received crc to rxcrc
logger.debug("%s %s", "Received data crc - ", rxcrc)
newrxcrc = str(hex(crc16(buffer0[:-4])).replace('x', '')[1:].zfill(4)).upper() # new crc
newrxcrc = newrxcrc.encode('utf-8')
logger.debug("%s %s", "Calculated new crc based on received data -", newrxcrc)
#### Check old and new crc's match if they don't return string with 0200 crc error
if newrxcrc != rxcrc:
logger.debug("%s %s %s %s", "Received crc - ", rxcrc, "does not match our generated crc - ", newrxcrc)
return '0200'
else:
logger.debug("CRC' match")
return '0'
def newcrc(buffer0):
logger.debug("New crc was called.")
buffer0 = buffer0.encode('UTF-8')
datacrc = str(hex(crc16(buffer0)).replace('x', '')[1:].zfill(4)).upper()
value = datacrc
logger.debug("%s %s", "Calculated new message crc -", datacrc)
return value
if __name__ == "__main__":
print(newcrc(str(sys.argv[1:])))
|
jlrodrig/MyAnalysis
|
MiniAnalyzer/python/ConfFile_cfg.py
|
Python
|
gpl-3.0
| 471
| 0.019108
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:myfile.root'
)
)
process.demo = cms.EDAnalyzer('MiniAnalyzer'
)
process.p = cms.Pa
|
th(process.demo)
|
|
cliftonmcintosh/openstates
|
openstates/co/votes.py
|
Python
|
gpl-3.0
| 12,425
| 0.002334
|
from openstates.utils import LXMLMixin
from billy.scrape.votes import VoteScraper, Vote
from billy.scrape.utils import convert_pdf
import datetime
import subprocess
import lxml
import os
import re
journals = "http://www.leg.state.co.us/CLICS/CLICS%s/csljournals.nsf/" \
"jouNav?Openform&%s"
date_re = re.compile(
r"(?i).*(?P<dt>(monday|tuesday|wednesday|thursday|friday|saturday|sunday)"
".*, \d{4}).*"
)
vote_re = re.compile((r"\s*"
"YES\s*(?P<yes_count>\d+)\s*"
"NO\s*(?P<no_count>\d+)\s*"
"EXCUSED\s*(?P<excused_count>\d+)\s*"
"ABSENT\s*(?P<abs_count>\d+).*"))
votes_re = r"(?P<name>\w+(\s\w\.)?)\s+(?P<vote>Y|N|A|E|-)"
def fix_typos(data):
return data.replace('Tueday', 'Tuesday') # Spelling is hard
class COVoteScraper(VoteScraper, LXMLMixin):
jurisdiction = 'co'
def scrape_house(self, session):
url = journals % (session, 'House')
page = self.lxmlize(url)
hrefs = page.xpath("//font//a")
for href in hrefs:
(path, response) = self.urlretrieve(href.attrib['href'])
data = convert_pdf(path, type='text')
data = fix_typos(data)
in_vote = False
cur_vote = {}
known_date = None
cur_vote_count = None
in_question = False
cur_question = None
cur_bill_id = None
for line in data.split("\n"):
if known_date is None:
dt = date_re.findall(line)
if dt != []:
dt, dow = dt[0]
known_date = datetime.datetime.strptime(dt,
"%A, %B %d, %Y")
non_std = False
if re.match("(\s+)?\d+.*", line) is None:
non_std = True
l = line.lower().strip()
skip = False
blacklist = [
"house",
"page",
"general assembly",
"state of colorado",
"session",
"legislative day"
]
for thing in blacklist:
if thing in l:
skip = True
if skip:
continue
found = re.findall(
"(?P<bill_id>(H|S|SJ|HJ)(B|M|R)\d{2}-\d{3,4})",
line
)
if found != []:
found = found[0]
cur_bill_id, chamber, typ = found
try:
if not non_std:
_, line = line.strip().split(" ", 1)
line = line.strip()
except ValueError:
in_vote = False
in_question = False
continue
if in_question:
cur_question += " " + line.strip()
continue
if ("The question being" in line) or \
("On motion of" in line) or \
("the following" in line) or \
("moved that the" in line):
cur_question = line.strip()
in_question = True
if in_vote:
if line == "":
likely_garbage = True
likely_garbage = False
if "co-sponsor" in line.lower():
likely_garbage = True
if 'the speaker' in line.lower():
likely_garbage = True
votes = re.findall(votes_re, line)
if likely_garbage:
votes = []
for person, _, v in votes:
cur_vote[person] = v
last_line = False
for who, _, vote in votes:
if who.lower() == "speaker":
last_line = True
if votes == [] or last_line:
in_vote = False
# save vote
yes, no, other = cur_vote_count
if cur_bill_id is None or cur_question is None:
continue
bc = {
"H": "lower",
"S": "upper",
"J": "joint"
}[cur_bill_id[0].upper()]
vote = Vote('lower',
known_date,
cur_question,
(yes > no),
yes,
no,
other,
session=session,
bill_id=cur_bill_id,
bill_chamber=bc)
vote.add_source(href.attrib['href'])
vote.add_source(url)
for person in cur_vote:
if not person:
continue
vot = cur_vote[person]
if person.endswith("Y"):
vot = "Y"
person = person[:-1]
if person.endswith("N"):
vot = "N"
person = person[:-1]
if person.endswith("E"):
vot = "E"
person = person[:-1]
if not person:
continue
if vot == 'Y':
vote.yes(person)
elif vot == 'N':
vote.no(person)
elif vot == 'E' or vot == '-':
vote.other(person)
self.save_vote(vote)
cur_vote = {}
in_question = False
cur_question = None
in_vote = False
cur_vote_count = None
continue
summ = vote_re.findall(line)
if summ == []:
continue
summ = summ[0]
yes, no, exc, ab = summ
yes, no, exc, ab = \
int(yes), int(no), int(exc), int(ab)
other = exc + ab
cur_vote_count = (yes, no, other)
in_vote = True
continue
os.unlink(path)
def scrape_senate(self, session):
url = journals % (session, 'Senate')
page = self.lxmlize(url)
hrefs = pa
|
ge.xpath("//font//a")
for href in hrefs:
(path, response) = self.urlretrieve(href.attrib['href'])
data = convert_pdf(path, type='text')
data = fix_typos(data)
cur_bill_id = None
cur_vote_count = None
in_vote = False
cur_question = None
in_question = False
known_date = None
cur_vote = {}
for line in data.split("\n"):
if not known_date:
|
dt = date_re.findall(line)
if dt != []:
dt, dow = dt[0]
dt = dt.replace(',', '')
known_date = datetime.datetime.strptime(dt, "%A %B %d %Y")
if in_question:
line = line.strip()
if re.match("\d+", line):
in_question = False
continue
try:
line, _ = line.rsplit(" ", 1)
cur_question += line.strip()
|
TimBuckley/effective_django
|
django/db/migrations/loader.py
|
Python
|
bsd-3-clause
| 11,967
| 0.001588
|
from importlib import import_module
import os
import sys
from django.apps import apps
from django.db.migrations.recorder import MigrationRecorder
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.migration import Migration
from django.db.migrations.state import ModelState
from django.db.migrations import operations
from django.utils import six
from django.conf import settings
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialisation, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py[c|o] files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py") or name.endswith(".pyc") or name.endswith(".pyo"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
south_style_migrations = False
for migration_name in migration_names:
try:
migration_module = import_module("%s.%s" % (module_name, migration_name))
except ImportError as e:
# Ignore South import errors, as we're triggering them
if "south" in str(e).lower():
south_style_migrations = True
break
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError("Migration %s in app %s has no Migration class" % (migration_name, app_config.label))
# Ignore South-style migrations
if hasattr(migration_module.Migration, "forwards"):
south_style_migrations = True
break
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(migration_name, app_config.label)
if so
|
uth_style_migrations:
self.unmigrated_apps.add(app_config.label)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises KeyError"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, a
|
pp_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError("There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix))
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replace
|
wxgeo/geophar
|
wxgeometrie/sympy/integrals/tests/test_risch.py
|
Python
|
gpl-2.0
| 35,961
| 0.006173
|
"""Most of these tests come from the examples in Bronstein's book."""
from sympy import (Poly, I, S, Function, log, symbols, exp, tan, sqrt,
Symbol, Lambda, sin, Eq, Ne, Piecewise, factor, expand_log, cancel,
expand, diff, pi, atan)
from sympy.integrals.risch import (gcdex_diophantine, frac_in, as_poly_1t,
derivation, splitfactor, splitfactor_sqf, canonical_representation,
hermite_reduce, polynomial_reduce, residue_reduce, residue_reduce_to_basic,
integrate_primitive, integrate_hyperexponential_polynomial,
integrate_hyperexponential, integrate_hypertangent_polynomial,
integrate_nonlinear_no_specials, integer_powers, DifferentialExtension,
risch_integrate, DecrementLevel, NonElementaryIntegral, recognize_log_derivative,
recognize_derivative, laurent_series)
from sympy.utilities.pytest import raises
from sympy.abc import x, t, nu, z, a, y
t0, t1, t2 = symbols('t:3')
i = Symbol('i')
def test_gcdex_diophantine():
assert gcdex_diophantine(Poly(x**4 - 2*x**3 - 6*x**2 + 12*x + 15),
Poly(x**3 + x**2 - 4*x - 4), Poly(x**2 - 1)) == \
(Poly((-x**2 + 4*x - 3)/5), Poly((x**3 - 7*x**2 + 16*x - 10)/5))
def test_frac_in():
assert frac_in(Poly((x + 1)/x*t, t), x) == \
(Poly(t*x + t, x), Poly(x, x))
assert frac_in((x + 1)/x*t, x) == \
(Poly(t*x + t, x), Poly(x, x))
assert frac_in((Poly((x + 1)/x*t, t), Poly(t + 1, t)), x) == \
(Poly(t*x + t, x), Poly((1 + t)*x, x))
raises(ValueError, lambda: frac_in((x + 1)/log(x)*t, x))
assert frac_in(Poly((2 + 2*x + x*(1 + x))/(1 + x)**2, t), x, cancel=True) == \
(Poly(x + 2, x), Poly(x + 1, x))
def test_as_poly_1t():
assert as_poly_1t(2/t + t, t, z) in [
Poly(t + 2*z, t, z), Poly(t + 2*z, z, t)]
assert as_poly_1t(2/t + 3/t**2, t, z) in [
Poly(2*z + 3*z**2, t, z), Poly(2*z + 3*z**2, z, t)]
assert as_poly_1t(2/((exp(2) + 1)*t), t, z) in [
Poly(2/(exp(2) + 1)*z, t, z), Poly(2/(exp(2) + 1)*z, z, t)]
assert as_poly_1t(2/((exp(2) + 1)*t) + t, t, z) in [
Poly(t + 2/(exp(2) + 1)*z, t, z), Poly(t + 2/(exp(2) + 1)*z, z, t)]
assert as_poly_1t(S(0), t, z) == Poly(0, t, z)
def test_derivation():
p = Poly(4*x**4*t**5 + (-4*x**3 - 4*x**4)*t**4 + (-3*x**2 + 2*x**3)*t**3 +
(2*x + 7*x**2 + 2*x**3)*t**2 + (1 - 4*x - 4*x**2)*t - 1 + 2*x, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t**2 - 3/(2*x)*t + 1/(2*x), t)]})
assert derivation(p, DE) == Poly(-20*x**4*t**6 + (2*x**3 + 16*x**4)*t**5 +
(21*x**2 + 12*x**3)*t**4 + (7*x/2 - 25*x**2 - 12*x**3)*t**3 +
(-5 - 15*x/2 + 7*x**2)*t**2 - (3 - 8*x - 10*x**2 - 4*x**3)/(2*x)*t +
(1 - 4*x**2)/(2*x), t)
assert derivation(Poly(1, t), DE) == Poly(0, t)
assert derivation(Poly(t, t), DE) == DE.d
assert derivation(Poly(t**2 + 1/x*t + (1 - 2*x)/(4*x**2), t), DE) == \
Poly(-2*t**3 - 4/x*t**2 - (5 - 2*x)/(2*x**2)*t - (1 - 2*x)/(2*x**3), t, domain='ZZ(x)')
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t1), Poly(t, t)]})
assert derivation(Poly(x*t*t1, t), DE) == Poly(t*t1 + x*t*t1 + t, t)
assert derivation(Poly(x*t*t1, t), DE, coefficientD=True) == \
Poly((1 + t1)*t, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x)]})
assert derivation(Poly(x, x), DE) == Poly(1, x)
# Te
|
st basic option
assert derivation((x + 1)/(x - 1), DE, basic=True) == -2/(1 - 2*x + x**2)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t, t)]})
assert derivation((t + 1)/(t - 1), DE, basic=True) == -2*t/(1 - 2*t + t**2)
a
|
ssert derivation(t + 1, DE, basic=True) == t
def test_splitfactor():
p = Poly(4*x**4*t**5 + (-4*x**3 - 4*x**4)*t**4 + (-3*x**2 + 2*x**3)*t**3 +
(2*x + 7*x**2 + 2*x**3)*t**2 + (1 - 4*x - 4*x**2)*t - 1 + 2*x, t, field=True)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t**2 - 3/(2*x)*t + 1/(2*x), t)]})
assert splitfactor(p, DE) == (Poly(4*x**4*t**3 + (-8*x**3 - 4*x**4)*t**2 +
(4*x**2 + 8*x**3)*t - 4*x**2, t), Poly(t**2 + 1/x*t + (1 - 2*x)/(4*x**2), t, domain='ZZ(x)'))
assert splitfactor(Poly(x, t), DE) == (Poly(x, t), Poly(1, t))
r = Poly(-4*x**4*z**2 + 4*x**6*z**2 - z*x**3 - 4*x**5*z**3 + 4*x**3*z**3 + x**4 + z*x**5 - x**6, t)
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
assert splitfactor(r, DE, coefficientD=True) == \
(Poly(x*z - x**2 - z*x**3 + x**4, t), Poly(-x**2 + 4*x**2*z**2, t))
assert splitfactor_sqf(r, DE, coefficientD=True) == \
(((Poly(x*z - x**2 - z*x**3 + x**4, t), 1),), ((Poly(-x**2 + 4*x**2*z**2, t), 1),))
assert splitfactor(Poly(0, t), DE) == (Poly(0, t), Poly(1, t))
assert splitfactor_sqf(Poly(0, t), DE) == (((Poly(0, t), 1),), ())
def test_canonical_representation():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})
assert canonical_representation(Poly(x - t, t), Poly(t**2, t), DE) == \
(Poly(0, t), (Poly(0, t),
Poly(1, t)), (Poly(-t + x, t),
Poly(t**2, t)))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert canonical_representation(Poly(t**5 + t**3 + x**2*t + 1, t),
Poly((t**2 + 1)**3, t), DE) == \
(Poly(0, t), (Poly(t**5 + t**3 + x**2*t + 1, t),
Poly(t**6 + 3*t**4 + 3*t**2 + 1, t)), (Poly(0, t), Poly(1, t)))
def test_hermite_reduce():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(t**2 + 1, t)]})
assert hermite_reduce(Poly(x - t, t), Poly(t**2, t), DE) == \
((Poly(-x, t), Poly(t, t)), (Poly(0, t), Poly(1, t)), (Poly(-x, t), Poly(1, t)))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(-t**2 - t/x - (1 - nu**2/x**2), t)]})
assert hermite_reduce(
Poly(x**2*t**5 + x*t**4 - nu**2*t**3 - x*(x**2 + 1)*t**2 - (x**2 - nu**2)*t - x**5/4, t),
Poly(x**2*t**4 + x**2*(x**2 + 2)*t**2 + x**2 + x**4 + x**6/4, t), DE) == \
((Poly(-x**2 - 4, t), Poly(4*t**2 + 2*x**2 + 4, t)),
(Poly((-2*nu**2 - x**4)*t - (2*x**3 + 2*x), t), Poly(2*x**2*t**2 + x**4 + 2*x**2, t)),
(Poly(x*t + 1, t), Poly(x, t)))
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1/x, t)]})
a = Poly((-2 + 3*x)*t**3 + (-1 + x)*t**2 + (-4*x + 2*x**2)*t + x**2, t)
d = Poly(x*t**6 - 4*x**2*t**5 + 6*x**3*t**4 - 4*x**4*t**3 + x**5*t**2, t)
assert hermite_reduce(a, d, DE) == \
((Poly(3*t**2 + t + 3*x, t), Poly(3*t**4 - 9*x*t**3 + 9*x**2*t**2 - 3*x**3*t, t)),
(Poly(0, t), Poly(1, t)),
(Poly(0, t), Poly(1, t)))
assert hermite_reduce(
Poly(-t**2 + 2*t + 2, t),
Poly(-x*t**2 + 2*x*t - x, t), DE) == \
((Poly(3, t), Poly(t - 1, t)),
(Poly(0, t), Poly(1, t)),
(Poly(1, t), Poly(x, t)))
assert hermite_reduce(
Poly(-x**2*t**6 + (-1 - 2*x**3 + x**4)*t**3 + (-3 - 3*x**4)*t**2 - 2*x*t - x - 3*x**2, t),
Poly(x**4*t**6 - 2*x**2*t**3 + 1, t), DE) == \
((Poly(x**3*t + x**4 + 1, t), Poly(x**3*t**3 - x, t)),
(Poly(0, t), Poly(1, t)),
(Poly(-1, t), Poly(x**2, t)))
assert hermite_reduce(
Poly((-2 + 3*x)*t**3 + (-1 + x)*t**2 + (-4*x + 2*x**2)*t + x**2, t),
Poly(x*t**6 - 4*x**2*t**5 + 6*x**3*t**4 - 4*x**4*t**3 + x**5*t**2, t), DE) == \
((Poly(3*t**2 + t + 3*x, t), Poly(3*t**4 - 9*x*t**3 + 9*x**2*t**2 - 3*x**3*t, t)),
(Poly(0, t), Poly(1, t)),
(Poly(0, t), Poly(1, t)))
def test_polynomial_reduce():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1 + t**2, t)]})
assert polynomial_reduce(Poly(1 + x*t + t**2, t), DE) == \
(Poly(t, t), Poly(x*t, t))
assert polynomial_reduce(Poly(0, t), DE) == \
(Poly(0, t), Poly(0, t))
def test_laurent_series():
DE = DifferentialExtension(extension={'D': [Poly(1, x), Poly(1, t)]})
a = Poly(36, t)
d = Poly((t - 2)*(t**2 - 1)**2, t)
F = Poly(t**2 - 1, t)
n = 2
assert laurent_series(a, d, F, n, DE) == \
(Poly(-3*t**3 + 3*t**2 - 6*t - 8, t), Poly(t**5 + t**4 - 2*t**3 - 2*t**2 + t + 1, t),
|
calvinchengx/fabriccolors
|
fabriccolors/main.py
|
Python
|
bsd-2-clause
| 1,828
| 0.001094
|
"""
This module contains fabriccolor's `main` method plus related subroutines.
"""
import fnmatch
import os
import sys
def find_fabsettings():
"
|
""
Look for fabsettings.py, which will contain all information about
target servers and distros on each server. i
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.f
|
ilter(filenames, 'fabsettings.py'):
matches.append(os.path.join(root, filename))
number_of_matches = len(matches)
if number_of_matches == 1:
path_to_fabsettings = matches[0]
load_fabsettings(path_to_fabsettings)
return True
return False
def load_fabsettings(path_to_fabsettings):
directory, fabsettings = os.path.split(path_to_fabsettings)
if directory not in sys.path:
sys.path.insert(0, directory)
def main():
"""
Main command-line execution loop.
Usage
fabc
"""
if find_fabsettings():
import fabsettings
project_sites = fabsettings.PROJECT_SITES.keys()
print "You have specified the follow server targets:"
print project_sites
# or organized according to distros
# TODO: we can now do things to the target server
# e.g. `fabc server_setup:root,dev` should fire off all the server setup
# scripts using root user, at the 'dev' server
# `fabc server_setup:vagrant` should fire off all the server setup
# scripts using the vagrant user, at the 'vagrant' vm
# and all these scripts are stored in fabfile.py
else:
print "fabric colors is a wrapper around python fabric."
print "Begin using fabric colors by defining your servers in fabsettings.py"
print "using the included fabsettings.py.sample as an example"
|
xuru/pyvisdk
|
pyvisdk/do/vim_esx_cl_inetworkfirewallrulesetallowediplist_firewall_ruleset_allowedip.py
|
Python
|
mit
| 1,110
| 0.008108
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
# This module is NOT auto-generated
# Inspired by decompiled Java classes from vCenter's internalvim25stubs.jar
# Unless states otherside, the methods and attributes were not used by esxcli,
# and thus not tested
log = logging.getLogger(__name__)
def VimEsxCLInetworkfirewallrulesetallowediplistF
|
irewallRulesetAllowedip(vim, *args, **kwargs):
obj = vim.client.factory.create('ns0:VimEsxCLInetworkfirewallrulesetallowediplistFirewallRulesetAllowedip')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError
|
('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'AllowedIPAddresses', 'Ruleset' ]
for name, arg in zip(required + optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
davedash/mysql-anonymous
|
anonymize.py
|
Python
|
mit
| 3,818
| 0.003405
|
#!/usr/bin/env python
# This assumes an id on each field.
import logging
import hashlib
import random
log = logging.getLogger('anonymize')
common_hash_secret = "%016x" % (random.getrandbits(128))
def get_truncates(config):
database = config.get('database', {})
truncates = database.get('truncate', [])
sql = []
for truncate in truncates:
sql.append('TRUNCATE `%s`' % truncate)
return sql
def get_deletes(config):
database = config.get('database', {})
tables = database.get('tables', [])
sql = []
for table, data in tables.iteritems():
if 'delete' in data:
fields = []
for f, v in data['delete'].iteritems():
fields.append('`%s` = "%s"' % (f, v))
statement = 'DELETE FROM `%s` WHERE ' % table + ' AND '.join(fields)
sql.append(statement)
return sql
listify = lambda x: x if isinstance(x, list) else [x]
def get_updates(config):
global common_hash_secret
database = config.get('database', {})
tables = database.get('tables', [])
sql = []
for table, data in tables.iteritems():
updates = []
for operation, details in data.iteritems():
if operation == 'nullify':
for field in listify(details):
updates.append("`%s` = NULL" % field)
elif operation == 'random_int':
for field in listify(details):
updates.append("`%s` = ROUND(RAND()*1000000)" % field)
elif operation == 'random_ip':
for field in listify(details):
updates.append("`%s` = INET_NTOA(RAND()*1000000000)" % field)
elif operation == 'random_email':
for field in listify(details):
updates.append("`%s` = CONCAT(id, '@mozilla.com')"
% field)
elif operation == 'random_username':
for field in listify(details):
updates.append("`%s` = CONCAT('_user_', id)" % field)
elif operation == 'hash_value':
for field in listify(details):
updates.append("`%(field)s` = MD5(CONCAT(@common_hash_secret, `%(field)s`))"
% dict(field=field))
elif operation == 'hash_email':
for field in listify(details):
updates.append("`%(field)s` = CONCAT(MD5(CONCAT(@common_hash_secret, `%(field)s`)), '@mozilla.com')"
% dict(field=field))
elif operation == 'delete':
continue
else:
log.warning('Unknown operation.')
if updates:
sql.append('UPDATE `%s` SET %s' % (table, ', '.join(updates)))
return sql
def anonymize(config):
database = config.get('database', {})
if 'name' in database:
print "USE `%s`;" % database['name']
print "SET FOREIGN_KEY_CHECKS=0;"
sql = []
sql.extend(get_truncates(config))
sql.extend(get_deletes(config))
sql.extend(get_updates(config))
for stmt in sql:
print stmt + ';'
print "SET FOREIGN_KEY_CHECKS=1;"
print
if __name__ == '__main__':
import yaml
import sys
|
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
files = [ 'anonymize.yml' ]
for f in files:
print "--"
print "-- %s" %f
print "--"
print "SET @common_hash_secret=rand();"
print ""
cfg = yaml.load(open(f))
if 'databases' not in cfg:
anonymize(cf
|
g)
else:
databases = cfg.get('databases')
for name, sub_cfg in databases.items():
print "USE `%s`;" % name
anonymize({'database': sub_cfg})
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_application_building_blocks/argparse_fromfile_prefix_chars.py
|
Python
|
apache-2.0
| 435
| 0.002299
|
im
|
port argparse
import shlex
parser = argparse.ArgumentParser(description='Short sample app',
fromfile_prefix_chars='@',
)
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
print(parser.parse_args(['@argparse_fromfile_prefix_chars.txt']))
| |
bandwidthcom/python-bandwidth-iris
|
iris_sdk/models/account.py
|
Python
|
mit
| 4,216
| 0.000474
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.account_users import AccountUsers
from iris_sdk.models.available_npa_nxx import AvailableNpaNxx
from iris_sdk.models.available_numbers import AvailableNumbers
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.account import AccountData
from iris_sdk.models.disc_numbers import DiscNumbers
from iris_sdk.models.disconnects import Disconnects
from iris_sdk.models.in_service_numbers import InServiceNumbers
from iris_sdk.models.line_option_orders import LineOptionOrder
from iris_sdk.models.import_tn_checker import ImportTnChecker
from iris_sdk.models.lnpchecker import LnpChecker
from iris_sdk.models.orders import Orders
from iris_sdk.models.lidbs import Lidbs
from iris_sdk.models.dldas import Dldas
from iris_sdk.models.subscriptions import
|
Subscriptions
from iris_sdk.models.portins import PortIns
from iris_sdk.m
|
odels.portouts import PortOuts
from iris_sdk.models.reservation import Reservation
from iris_sdk.models.site_hosts import SiteHosts
from iris_sdk.models.sites import Sites
from iris_sdk.models.tn_option_orders import TnOptionOrders
XPATH_ACCOUNT = "/accounts/{}"
class Account(BaseResource, AccountData):
"""Iris account"""
_xpath = XPATH_ACCOUNT
@property
def available_npa_nxx(self):
return self._available_npa_nxx
@property
def available_numbers(self):
return self._available_numbers
@property
def disconnected_numbers(self):
return self._disconnected_numbers
@property
def disconnects(self):
return self._disconnects
@property
def dldas(self):
return self._dldas
@property
def hosts(self):
return self._hosts
@property
def id(self):
return self.account_id
@id.setter
def id(self, id):
self.account_id = id
@property
def import_tn_checker(self):
return self._import_tn_checker
@property
def in_service_numbers(self):
return self._in_service_numbers
@property
def lidbs(self):
return self._lidbs
@property
def line_option_orders(self):
return self._line_option_orders
@property
def lnpchecker(self):
return self._lnpchecker
@property
def orders(self):
return self._orders
@property
def portins(self):
return self._portins
@property
def portouts(self):
return self._portouts
@property
def sites(self):
return self._sites
@property
def subscriptions(self):
return self._subscriptions
@property
def tnreservation(self):
return self._tnreservation
@property
def users(self):
return self._users
@property
def tn_option_orders(self):
return self._tn_option_orders
def __init__(self, parent=None, client=None):
if client is not None:
self.id = client.config.account_id
super().__init__(parent, client)
AccountData.__init__(self)
self._available_npa_nxx = AvailableNpaNxx(self, client)
self._available_numbers = AvailableNumbers(self, client)
self._disconnected_numbers = DiscNumbers(self, client)
self._disconnects = Disconnects(self, client)
self._hosts = SiteHosts(self, client)
self._import_tn_checker = ImportTnChecker(self, client)
self._in_service_numbers = InServiceNumbers(self, client)
self._line_option_orders = LineOptionOrder(self, client)
self._lnpchecker = LnpChecker(self, client)
self._orders = Orders(self, client)
self._portins = PortIns(self, client)
self._portouts = PortOuts(self, client)
self._lidbs = Lidbs(self, client)
self._dldas = Dldas(self, client)
self._subscriptions = Subscriptions(self, client)
self._sites = Sites(self, client)
self._tnreservation = Reservation(self, client)
self._users = AccountUsers(self, client)
self._tn_option_orders = TnOptionOrders(self, client)
def get(self, id=None):
return self._get_data(id)
|
jawilson/home-assistant
|
homeassistant/components/hive/light.py
|
Python
|
apache-2.0
| 5,000
| 0.0002
|
"""Support for Hive light devices."""
from datetime import timedelta
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.helpers.entit
|
y import DeviceInfo
import homeassistant.util.color as color_util
from . import HiveEntity, refresh_system
from .const import ATTR_MODE, DOMAIN
PARA
|
LLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Hive thermostat based on a config entry."""
hive = hass.data[DOMAIN][entry.entry_id]
devices = hive.session.deviceList.get("light")
entities = []
if devices:
for dev in devices:
entities.append(HiveDeviceLight(hive, dev))
async_add_entities(entities, True)
class HiveDeviceLight(HiveEntity, LightEntity):
"""Hive Active Light Device."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return device information."""
return DeviceInfo(
identifiers={(DOMAIN, self.device["device_id"])},
manufacturer=self.device["deviceData"]["manufacturer"],
model=self.device["deviceData"]["model"],
name=self.device["device_name"],
sw_version=self.device["deviceData"]["version"],
via_device=(DOMAIN, self.device["parentDevice"]),
)
@property
def name(self):
"""Return the display name of this light."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
return self.device["deviceData"]["online"]
@property
def extra_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def brightness(self):
"""Brightness of the light (an integer in the range 1-255)."""
return self.device["status"]["brightness"]
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self.device.get("min_mireds")
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self.device.get("max_mireds")
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self.device["status"].get("color_temp")
@property
def hs_color(self):
"""Return the hs color value."""
if self.device["status"]["mode"] == "COLOUR":
rgb = self.device["status"].get("hs_color")
return color_util.color_RGB_to_hs(*rgb)
return None
@property
def is_on(self):
"""Return true if light is on."""
return self.device["status"]["state"]
@refresh_system
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
new_brightness = None
new_color_temp = None
new_color = None
if ATTR_BRIGHTNESS in kwargs:
tmp_new_brightness = kwargs.get(ATTR_BRIGHTNESS)
percentage_brightness = (tmp_new_brightness / 255) * 100
new_brightness = int(round(percentage_brightness / 5.0) * 5.0)
if new_brightness == 0:
new_brightness = 5
if ATTR_COLOR_TEMP in kwargs:
tmp_new_color_temp = kwargs.get(ATTR_COLOR_TEMP)
new_color_temp = round(1000000 / tmp_new_color_temp)
if ATTR_HS_COLOR in kwargs:
get_new_color = kwargs.get(ATTR_HS_COLOR)
hue = int(get_new_color[0])
saturation = int(get_new_color[1])
new_color = (hue, saturation, 100)
await self.hive.light.turnOn(
self.device, new_brightness, new_color_temp, new_color
)
@refresh_system
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self.hive.light.turnOff(self.device)
@property
def supported_features(self):
"""Flag supported features."""
supported_features = None
if self.device["hiveType"] == "warmwhitelight":
supported_features = SUPPORT_BRIGHTNESS
elif self.device["hiveType"] == "tuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
elif self.device["hiveType"] == "colourtuneablelight":
supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR
return supported_features
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.light.getLight(self.device)
self.attributes.update(self.device.get("attributes", {}))
|
Neomania/BeeSimulation
|
fonts.py
|
Python
|
mit
| 808
| 0.012376
|
#--------------------------------------
|
-----------------------------------------
# Name: module1
# Purpose:
#
# Author
|
: Timothy
#
# Created: 02/02/2015
# Copyright: (c) Timothy 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import pygame
import pygame.freetype
pygame.freetype.init()
infoFontSize = 18
infoFont = pygame.freetype.SysFont('Consolas',infoFontSize)
selectionFontSize = 12
selectionFont = pygame.freetype.SysFont('Consolas',selectionFontSize)
speedFontSize = 20
speedFont = pygame.freetype.SysFont('Consolas',speedFontSize)
detailedFontSize = 20
detailedFont = pygame.freetype.SysFont('Consolas',detailedFontSize)
pollenRateFontSize = 16
pollenRateFont = pygame.freetype.SysFont('Consolas',pollenRateFontSize)
|
PainNarrativesLab/IOMNarratives
|
IOMDataService.py
|
Python
|
mit
| 5,350
| 0.002991
|
import shelve
"""
Currently unused. All mysql queries are now done via IomDataModels.
May be resurrected to help with shelve and pickles
"""
from USCProjectDAOs import IOMProjectDAO
class IOMService(IOMProjectDAO):
"""
This handles interactions with the IOM data database and storage files.
All user applications should work off of this
"""
def __init__(self):
"""
Will hold the identifiers for records
"""
self.names = []
"""
Will hold the positive sentiment scores
"""
self.posSent = []
"""
Will hold the negative sentiment scores
"""
self.negSent = []
"""
Will hold the net sentiment scores
"""
self.netSent = []
"""
Will hold the sums of the absolute values of the sentiment scores
"""
self.absumSent = []
def connect_to_mysql(self, test):
"""
Test should be boolean
"""
IOMProjectDAO.__init__(self, test, 'true')
def get_sentiment_data_from_file(self, datafile):
"""
This is the generic file data loader.
datafile shold be a path to file
"""
# Open data file and push into lists
db = shelve.open(datafile)
self.keys = list(db.keys())
for k in self.keys:
s = db[k]
self.names.append(s['quote_id'])
self.posSent.append(s['avgPos'])
self.negSent.append(s['avgNeg'])
self.netSent.append(s['netSent'])
self.absumSent.append(abs(s['avgPos']) + abs(s['avgNeg']))
db.close()
def save_sentiment_data_to_file(self, datafile, label):
"""
This is a generic file data saver.
datafile should be a path to
|
file
@param datafile: The path to the datafile
@type datafile: C{string}
"""
# try:
db = shelve.open(datafile)
db[label] = self.to_save
db.close()
print(self.to_save)
return self.to_save
# Check whether the problem was there not being a dictionary availbl
|
e to save
#except:
# try:
# self.to_save
# print ('Problem saving')
# except:
# print ('No variable self.to_save set')
# def get_data_from_database(self, query, val):
# """
# This executes a parameterized query of the mysql database, stores the results in a list of dictionaries called self.dbdata.
#
# @return Also returns dbdata
#
# @param query A mysql query with %s in place of all substitution variables
# @type query string
# @param val A list containing all substition parameters or empty if no substitutions are needed
# @type val list
#
# TODO Should have something to check whether a connection exists
# """
# self.connect_to_mysql('false')
# self.query = query
# self.val = val
# self.returnAll()
# self.dbdata = list(self.results)
#
#
# class QueryShell(IOMService):
# """
# This is just a shell to easily run queries on the database and get the results as a list of dictionaries
#
# @return Returns list of dictionaries
# """
#
# def __init__(self):
# IOMService.__init__(self)
#
# def query(self, query, val):
# self.get_data_from_database(query, val)
# return self.dbdata
#
#
# class DHShell(IOMService):
# """
# This is a shell for use in public events to avoid cluttering up the page with each step of the query
# It resets all its values after returning an array of dictionaries and thus need not be reinvoked.
# Note that These queries are not parameterized
#
# @return Returns list of dictionaries
# """
#
# def __init__(self, query_string):
# """
# @param query_string The query string
# @type string
# """
# IOMService.__init__(self)
# self.q(query_string)
#
# def q(self, query_string):
# # Get rid of previous queries
# # self.results = []
# # self.dbdata = None
# #These queries are not parameterized
# val = []
# self.get_data_from_database(query_string, val)
# return self.dbdata
class ShelveDataHandler(IOMService):
def __init__(self):
import shelve
self.datafolder = 'storedData/'
def openData(self, file_name):
"""
Opens shelve file and returns the list
"""
db = shelve.open(self.datafolder + file_name)
list_to_populate = list(db.values())
db.close()
return list_to_populate[0]
def bagSaver(self, list_to_save, file_name):
"""
Saves a list of raw data into a shelve file.
@param list_to_save A list of items to be saved into shelf file
@type list_to_save list
@param file_name The name of the file into which the items should be saved
@type string
"""
try:
label = file_name
to_save = list_to_save
db = shelve.open(self.datafolder + file_name)
db[label] = to_save
db.close()
except:
print('Error saving to shelve file %s' % file_name)
else:
print('Successfully saved to shelve file %s ' % file_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.