repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Arabidopsis-Information-Portal/hrgrn_webservices | services/hrgrn_list_network/main.py | Python | gpl-2.0 | 2,479 | 0.008471 | # HRGRN WebServices
# Copyright (C) 2016 Xinbin Dai, Irina Belyaeva
# This file is part of HRGRN WebServices API.
#
# HRGRN API is fr | ee software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# HRGRN API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You | should have received a copy of the GNU General Public License
# along with HRGRN API. If not, see <http://www.gnu.org/licenses/>.
"""
Main Module
"""
import json
import requests
import logging
import timer as timer
from requests.exceptions import ConnectionError
from requests import Session
import service as svc
import request_handler as rh
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# This function acts as a list endpoint
def list(args):
session = Session()
# get service url
svc_url = svc.get_svc_base_url()
params = {'listall': 'T', 'format':'json'}
try:
with timer.Timer() as t:
log.info("Service URL:" + svc_url)
# execute request
response = rh.build_payload(svc_url, params, session)
log.debug(response)
if (response):
for item in response:
print json.dumps(item, indent=3)
print '---'
else:
raise Exception("Response cannot be null!")
except ValueError as e:
error_msg = "ValueError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
except requests.exceptions.HTTPError as e:
error_msg = "HTTPError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
except ConnectionError as e:
error_msg = "ConnectionError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
except Exception as e:
error_msg = "GenericError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
finally:
log.info('Request took %.03f sec.' % t.interval)
|
Shatki/PyIMU | calibration/pyIMUCalibrationServer.py | Python | gpl-3.0 | 2,005 | 0.00164 | from socket import *
from pytroykaimu import TroykaIMU
import time
import datetime
# Адрес
HOST = ''
PORT = 21567
BUFSIZ = 128
ADDR = (HOST, PORT)
imu = TroykaIMU()
tcpSerSock = socket(AF_INET, SOCK_STREAM)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5)
# Запрет на ожидание
# tcpSerSock.setblocking(False)
def print_log(text):
print('{} {}'.format(datetime.datetime.now(), text))
while True:
try:
print_log('waiting for conne | ction...')
# Ждем соединения клиента
tcpCliSock, addr = tcpSerSock.accept()
# Время ожидания данных от клиента
| tcpCliSock.settimeout(0.02)
print_log('connection from: ' + str(addr))
# Соединились, передаем данные
while True:
m_x, m_y, m_z = imu.magnetometer.read_calibrate_xyz()
a_x, a_y, a_z = imu.accelerometer.read_gxyz()
g_x, g_y, g_z = imu.gyroscope.read_radians_per_second_xyz()
data = "{:f}; {:f}; {:f}; " \
"{:f}; {:f}; {:f}; " \
"{:f}; {:f}; {:f}; ".format(m_x, m_y, m_z,
a_x, a_y, a_z,
g_x, g_y, g_z)
dataencode = data.encode('utf-8').ljust(128, b' ')
if dataencode:
try:
# отправляем данные
tcpCliSock.send(dataencode)
time.sleep(0.05)
except:
# разрываем соединение, проблема с клиентом
print_log('Client terminated the connection')
tcpCliSock.close()
break
# Ждем соединение
except KeyboardInterrupt:
# Закрываем сервер
print_log('Server was closed')
tcpSerSock.close()
break
|
ericholscher/django | django/contrib/admin/filters.py | Python | bsd-3-clause | 16,609 | 0.001204 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import smart_text, force_text
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.admin.utils import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
from django.contrib.admin.options import IncorrectLookupParameters
class ListFilter(object):
title = None # H | uman-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
| raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = field.get_choices(include_blank=False)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (isinstance(self.field, models.related.RelatedObject)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field |
Matoking/NVIDIA-vBIOS-VFIO-Patcher | nvidia_vbios_vfio_patcher.py | Python | cc0-1.0 | 6,176 | 0.000648 | #!/usr/bin/env python
from __future__ import print_function
import sys
import binascii
import argparse
import re
# raw_input doesn't exist in Python 3
try:
raw_input
except NameError:
raw_input = input
PROMPT_TEXT = "I agree to be careful"
WARNING_TEXT = """
USE THIS SOFTWARE AT YOUR OWN DISCRETION. THIS SOFTWARE HAS *NOT* BEEN
EXTENSIVELY TESTED AND MAY NOT WORK WITH YOUR GRAPHI | CS CARD.
If you want to save the created vBIOS file, type the following phrase
EXACTLY as it is written below:
%s
""" % PROMPT_TEXT
class CheckException(Exception):
pass
class VBIOSROM(object):
def __init__(self, f):
"""
Load a VBIOS and convert it | into a hex-ascii format
for easier editing
"""
content = f.read()
self.offsets = {
"header": None,
"footer": None
}
self.content = binascii.hexlify(content)
def detect_offsets(self, disable_footer=False):
"""
Search the ROM for known sections of data and raise an AssertionError
if any of the checks fails
"""
# Search for the header that starts the file
# Examples of this header:
#
# U.y.K7400.L.w.VIDEO
# U.x.K7400.L.w.VIDEO
#
HEADER_REGEX = (
b'55aa(([a-z]|[0-9]){2})(eb)(([a-z]|[0-9]){20})(564944454f)'
)
result = re.compile(HEADER_REGEX).search(self.content)
if not result or len(result.groups()) != 6:
raise CheckException("Couldn't find the ROM header!")
self.offsets["header"] = result.start(0)
if not disable_footer:
# Search for the footer, which are shortly followed by
# 'NPDS' and 'NPDE' strings. 'NPDS' and 'NPDE' markers are separated by
# 28 ASCII characters
FOOTER_REGEX = (
b'564e(([a-z]|[0-9]){348})(4e504453)(([a-z]|[0-9]){56})(4e504445)'
)
result = re.compile(FOOTER_REGEX).search(self.content)
if not result or len(result.groups()) != 6:
raise CheckException("Couldn't find the ROM footer!")
self.offsets["footer"] = result.start(0)
def run_sanity_tests(self, ignore_check=False):
"""
Run a few sanity tests on the ROM to be a little more sure we are
working with a valid ROM
"""
try:
# There should be one 'NPDS' marker and three 'NPDE' markers
# before the footer we've already found
#
# The 'NPDS' marker should be followed by two 'NPDE' markers
npds_count = self.content.count(
b"4e504453", self.offsets["header"], self.offsets["footer"])
if npds_count != 1:
raise CheckException(
"Expected only one 'NPDS' marker between header and "
"footer, found %d" % npds_count)
npde_count = self.content.count(
b"4e504445", self.offsets["header"], self.offsets["footer"])
if npde_count != 3:
raise CheckException(
"Expected only three 'NPDE' markers between header and "
"footer, found %d" % npde_count)
npde_after_npds_count = self.content.count(
b"4e504445", self.content.find(b"4e504453"),
self.offsets["footer"])
if npde_after_npds_count != 2:
raise CheckException(
"Expected two 'NPDE' markers after the 'NPDS' marker")
except CheckException as e:
if ignore_check:
print("Encountered error during sanity check: %s" % str(e))
print("Ignoring...")
return
else:
raise
print("No problems found.")
def get_spliced_rom(self, disable_footer=False):
"""
Convert the internal hex-ascii representation of the ROM
into binary data for saving
"""
start = self.offsets["header"]
if not disable_footer:
end = self.offsets["footer"]
spliced = self.content[start:end]
else:
spliced = self.content[start:]
return binascii.unhexlify(spliced)
def main():
parser = argparse.ArgumentParser(
description=(
"Convert a full NVIDIA vBIOS ROM into a form compatible "
"for PCI passthrough."
)
)
parser.add_argument(
"-i", type=str, required=True,
help="The full ROM to read")
parser.add_argument(
"-o", type=str, required=True,
help="Path for saving the newly generated ROM")
parser.add_argument(
"--ignore-sanity-check", default=False, action="store_true",
help="Don't halt the script if any of the sanity checks fails"
)
parser.add_argument(
"--disable-footer-strip", default=False, action="store_true",
help="Don't strip the footer from the vBIOS (Allows you to convert older gen GPUs)"
)
parser.add_argument(
"--skip-the-very-important-warning",
default=False, action="store_true",
help=(
"Skip the very important warning and save the ROM without asking "
"for any input."
)
)
args = parser.parse_args()
print("Opening the ROM file...")
with open(args.i, "rb") as f:
rom = VBIOSROM(f)
print("Scanning for ROM offsets...")
rom.detect_offsets(args.disable_footer_strip)
print("Offsets found!")
if not args.disable_footer_strip:
print("Running sanity checks...")
rom.run_sanity_tests(args.ignore_sanity_check)
spliced_rom = rom.get_spliced_rom(args.disable_footer_strip)
if not args.skip_the_very_important_warning:
print(WARNING_TEXT)
print("Type here: ", end="")
answer = raw_input()
if answer != PROMPT_TEXT:
print("Wrong answer, halting...")
sys.exit(1)
print("Writing the edited ROM...")
with open(args.o, "wb") as f:
f.write(spliced_rom)
print("Done!")
if __name__ == "__main__":
main()
|
zstackorg/zstack-utility | zstacklib/zstacklib/test/test_table.py | Python | apache-2.0 | 818 | 0.003667 | '''
@author: Frank
'''
import unittest
from zstacklib.iptables import iptables
class Test(unittest.TestCase):
def testName(self):
iptc = iptables.from_iptables_xml()
tbl = iptc.get_filter_table()
c = iptables.Chain()
c.name = 'testchain'
r = iptab | les.Rule()
m = iptables.TcpMatch()
m.dport = 10
m.sport = 1000
r.add_match(m)
t = iptables.AcceptTarget()
r.set_target(t)
c.add_rule(r)
r = iptables.Rule()
m = iptables.IcmpMatch()
m.icmp_type = 8
r.add_match(m)
t = iptables.ReturnTarget()
| r.set_target(t)
c.add_rule(r)
tbl.add_chain(c)
print tbl
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
cldrn/rainmap-lite | rainmap-lite/nmaper/migrations/0009_auto_20160108_0613.py | Python | gpl-3.0 | 538 | 0.001859 | # -*- coding: utf-8 -*-
# Generated by Django 1 | .9.1 on 2016-01-08 06:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nmaper', '0008_auto_20160108_0558'),
]
operations = [
migrations.AlterField(
model_name='nmapscan',
name='email_text',
| field=models.CharField(choices=[('waiting', 'Waiting'), ('running', 'Running'), ('finished', 'Finished')], max_length=8),
),
]
|
stevewoolley/IoT | static_host_pub.py | Python | apache-2.0 | 2,025 | 0.002963 | #!/usr/bin/env python
import json
import awsiot
import logging
import platform
import psutil
import datetime
from gpiozero import *
NET_INTERFACES = ['en0', 'en1', 'en2', 'en3', 'wlan0', 'wlan1', 'eth0', 'eth1']
def get_ip(i):
if i in psutil.net_if_addrs():
try:
for k in psutil.net_if_addrs()[i]:
family, address, netmask, broadcast, ptp = k
if family == 2:
return address
return None
except Exception as ex:
logging.info("get_ip {} {}".format(i, ex.message))
return None
else:
return None
if __name__ == "__main__":
parser = awsiot.iot_arg_parser()
args = parser.parse_args()
logging.basicConfig(filename=awsiot.LOG_FILE, level=args.log_level, format=awsiot.LOG_FORMAT)
publisher = awsiot.MQTT(args.endpoint, args.rootCA, args.cert, args.key)
properties = {}
mem = psutil.virtual_memory()
disk = psutil.disk_usage('/')
properties["bootTime"] = datetime.datetime.fromtimestamp | (psutil.boot_time()).strftime(awsiot.DATE_FORMAT).strip()
if platform.system() == 'Darwin': # mac
properties["release"] = platform.mac_ver()[0]
elif platform.machine().startswith('arm') and platform.system() == 'Linux': # raspberry pi
properties["distribution"] = "{} {}".format(platform.dist()[0], platform.dist()[1])
properties["hardware"] = "Pi Model {} V{}".format(pi_info().model, pi_info().pcb_revision)
properties["hostname"] = platf | orm.node()
properties["machine"] = platform.machine()
properties["system"] = platform.system()
properties["totalDiskSpaceRoot"] = int(disk.total / (1024 * 1024))
properties["cpuProcessorCount"] = psutil.cpu_count()
properties["ramTotal"] = int(mem.total / (1024 * 1024))
for iface in NET_INTERFACES:
properties["{}IpAddress".format(iface)] = get_ip(iface)
publisher.publish(awsiot.iot_thing_topic(args.thing), awsiot.iot_payload(awsiot.REPORTED, properties))
|
jjscarafia/l10n-canada | res_partner_attributes_add_SIN/__openerp__.py | Python | agpl-3.0 | 1,378 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: y | ou can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| # GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Canada Social Insurance Number (SIN/NAS)',
'version': '1.0',
'author': "Savoir-faire Linux,Odoo Community Association (OCA)",
'website': 'http://www.savoirfairelinux.com',
'license': 'AGPL-3',
'category': 'Localisation/Canada',
'depends': ['base'],
'data': [
'views/res_partner.xml',
],
'installable': True,
}
|
pm5/python-moretext | setup.py | Python | mit | 466 | 0.045064 | from distutils.core import setup
setup(
name = 'moretext',
packages = ['moretext'],
version = '0.1',
| description = 'Get dummy Chinese text (lorem ipsum) with Handlino serivce.',
author = 'Pomin Wu',
author_email = 'pomin5@gmail.com',
url = 'https://github.com/pm5/python- | moretext',
download_url = 'https://github.com/pm5/python-moretext/tarball/v0.1',
keywords = ['test', 'lorem', 'ipsum', 'placeholder'],
classifiers = [],
)
|
jeremiahsavage/cwltool | cwltool/expression.py | Python | apache-2.0 | 5,948 | 0.002522 | import subprocess
import json
import logging
import os
import re
from typing import Any, AnyStr, Union, Text, Dict, List
import schema_salad.validate as validate
import schema_salad.ref_resolver
from .utils import aslist, get_feature
from .errors import WorkflowException
from . import sandboxjs
from . import docker
_logger = logging.getLogger("cwltool")
def jshead(engineConfig, rootvars):
# type: (List[Text], Dict[Text, Any]) -> Text
return u"\n".join(engineConfig + [u"var %s = %s;" % (k, json.dumps(v, indent=4)) for k, v in rootvars.items()])
seg_symbol = r"""\w+"""
seg_single = r"""\['([^']|\\')+'\]"""
seg_double = r"""\["([^"]|\\")+"\]"""
seg_index = r"""\[[0-9]+\]"""
segments = r"(\.%s|%s|%s|%s)" % (seg_symbol, seg_single, seg_double, seg_index)
segment_re = re.compile(segments, flags=re.UNICODE)
param_re = re.compile(r"\((%s)%s*\)$" % (seg_symbol, segments), flags=re.UNICODE)
JSON = Union[Dict[Any,Any], List[Any], Text, int, long, float, bool, None]
class SubstitutionError(Exception):
pass
def scanner(scan): # type: (Text) -> List[int]
DEFAULT = 0
DOLLAR = 1
PAREN = 2
BRACE = 3
SINGLE_QUOTE = 4
DOUBLE_QUOTE = 5
BACKSLASH = 6
i = 0
stack = [DEFAULT]
start = 0
while i < len(scan):
state = stack[-1]
c = scan[i]
if state == DEFAULT:
if c == '$':
stack.append(DOLLAR)
elif c == '\\':
stack.append(BACKSLASH)
elif state == BACKSLASH:
stack.pop()
if stack[-1] == DEFAULT:
return [i-1, i+1]
elif state == DOLLAR:
if c == '(':
start = i-1
stack.append(PAREN)
elif c == '{':
start = i-1
stack.append(BRACE)
else:
stack.pop()
elif state == PAREN:
if c == '(':
stack.append(PAREN)
elif c == ')':
stack.pop()
if stack[-1] == DOLLAR:
return [start, i+1]
elif c == "'":
stack.append(SINGLE_QUOTE)
elif c == '"':
stack.append(DOUBLE_QUOTE)
elif state == BRACE:
if c == '{':
stack.append(BRACE)
elif c == '}':
stack.pop()
if stack[-1] == DOLLAR:
return [start, i+1]
elif c == "'":
stack.append(SINGLE_QUOTE)
elif c == '"':
stack.append(DOUBLE_QUOTE)
elif state == SINGLE_QUOTE:
if c == "'":
stack.pop()
elif c == '\\':
stack.append(BACKSLASH)
elif state == DOUBLE_QUOTE:
if c == '"':
stack.pop()
elif c == '\\':
stack.append(BACKSLASH)
i += 1
if len(s | tack) > 1:
raise SubstitutionError("Substitution error, unfinished block starting at position {}: {}".format(start, scan[start:]))
else:
return None
def next_seg(remain, obj): # type: (Text, Any)->Text
if remain:
m = segment_re.match(remain)
if m.group(0)[0] == '.':
return next_seg(remain[m.end(0):], obj[m.group(0)[1:]])
elif m.group(0)[1] in ("'", '"'):
| key = m.group(0)[2:-2].replace("\\'", "'").replace('\\"', '"')
return next_seg(remain[m.end(0):], obj[key])
else:
key = m.group(0)[1:-1]
return next_seg(remain[m.end(0):], obj[int(key)])
else:
return obj
def evaluator(ex, jslib, obj, fullJS=False, timeout=None):
# type: (Text, Text, Dict[Text, Any], bool, int) -> JSON
m = param_re.match(ex)
if m:
return next_seg(m.group(0)[m.end(1) - m.start(0):-1], obj[m.group(1)])
elif fullJS:
return sandboxjs.execjs(ex, jslib, timeout=timeout)
else:
raise sandboxjs.JavascriptException("Syntax error in parameter reference '%s' or used Javascript code without specifying InlineJavascriptRequirement.", ex)
def interpolate(scan, rootvars,
timeout=None, fullJS=None, jslib=""):
# type: (Text, Dict[Text, Any], int, bool, Union[str, Text]) -> JSON
scan = scan.strip()
parts = []
w = scanner(scan)
while w:
parts.append(scan[0:w[0]])
if scan[w[0]] == '$':
e = evaluator(scan[w[0]+1:w[1]], jslib, rootvars, fullJS=fullJS,
timeout=timeout)
if w[0] == 0 and w[1] == len(scan):
return e
leaf = json.dumps(e, sort_keys=True)
if leaf[0] == '"':
leaf = leaf[1:-1]
parts.append(leaf)
elif scan[w[0]] == '\\':
e = scan[w[1]-1]
parts.append(e)
scan = scan[w[1]:]
w = scanner(scan)
parts.append(scan)
return ''.join(parts)
def do_eval(ex, jobinput, requirements, outdir, tmpdir, resources,
context=None, pull_image=True, timeout=None):
# type: (Union[dict, AnyStr], Dict[Text, Union[Dict, List, Text]], List[Dict[Text, Any]], Text, Text, Dict[Text, Union[int, Text]], Any, bool, int) -> Any
runtime = resources.copy()
runtime["tmpdir"] = tmpdir
runtime["outdir"] = outdir
rootvars = {
u"inputs": jobinput,
u"self": context,
u"runtime": runtime }
if isinstance(ex, (str, Text)):
fullJS = False
jslib = u""
for r in reversed(requirements):
if r["class"] == "InlineJavascriptRequirement":
fullJS = True
jslib = jshead(r.get("expressionLib", []), rootvars)
break
return interpolate(ex,
rootvars,
timeout=timeout,
fullJS=fullJS,
jslib=jslib)
else:
return ex
|
LLNL/spack | var/spack/repos/builtin/packages/guidance/package.py | Python | lgpl-2.1 | 1,752 | 0.001142 | # Copy | right 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
from spack import *
class Guidance(MakefilePack | age):
"""Guidance: Accurate detection of unreliable alignment regions accounting
for the uncertainty of multiple parameters."""
homepage = "http://guidance.tau.ac.il/ver2/"
url = "http://guidance.tau.ac.il/ver2/guidance.v2.02.tar.gz"
version('2.02', sha256='825e105dde526759fb5bda1cd539b24db0b90b8b586f26b1df74d9c5abaa7844')
depends_on('perl', type=('build', 'run'))
depends_on('perl-bioperl', type=('build', 'run'))
depends_on('ruby')
depends_on('prank')
depends_on('clustalw')
depends_on('mafft')
depends_on('muscle')
conflicts('%gcc@6.2.0:')
def edit(self, spec, prefix):
for dir in 'Guidance', 'Selecton', 'bioSequence_scripts_and_constants':
with working_dir(join_path('www', dir)):
files = glob.iglob('*.pl')
for file in files:
perl = FileFilter(file)
perl.filter('#!/usr/bin/perl -w', '#!/usr/bin/env perl')
def install(self, spac, prefix):
mkdir(prefix.bin)
install_tree('libs', prefix.bin.libs)
install_tree('programs', prefix.bin.programs)
install_tree('www', prefix.bin.www)
with working_dir(join_path('www', 'Guidance')): # copy without suffix
install('guidance.pl', join_path(prefix.bin.www.Guidance,
'guidance'))
def setup_run_environment(self, env):
env.prepend_path('PATH', prefix.bin.www.Guidance)
|
bmwiedemann/linuxcnc-mirror | configs/sim/axis/remap/iocontrol-removed/python/tooltable.py | Python | lgpl-2.1 | 6,047 | 0.017529 | # This is a component of LinuxCNC
# Copyright 2011, 2013 Dewey Garrett <dgarrett@panix.com>, Michael
# Haberler <git@mah.priv.at>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import re
class EmcToolTable(object):
''' intended as bug-compatible Python replacement for the
tooltable io used in iocontrol
NB: old file formats not supported.
'''
ttype = { 'T' : int, 'P': int, 'Q':int,
'X' : float, 'Y' : float, 'Z' : float,
'A' : float, 'B' : float, 'C' : float,
'U' : float, 'V' : float, 'W' : float,
'I' : float, 'J' : float, 'D' : float }
def __init__(self,filename,random_toolchanger):
self.filename = filename
self.random_toolchanger = random_toolchanger
def load_table(self, tooltable,comments,fms):
self.fakepocket = 0
fp = open(self.filename)
lno = 0
for line in fp.readlines():
lno += 1
if not line.startswith(';') and line.strip():
entry = self.parseline(lno,line.strip())
if entry:
self.assign(tooltable,entry,comments,fms)
fp.close()
def save_table(self, tooltable, comments,fms):
os.rename(self.filename,self.filename + '.bak')
fp = open(self.filename, 'w')
start = 0 if self.random_toolchanger else 1
for p in range(start,len(tooltable)):
t = tooltable[p]
if t.toolno != -1:
print >> fp, "T%d P%d" % (t.toolno, p if self.random_toolchanger else fms[p]),
if t.diameter: print >> fp, "D%f" % (t.diameter),
if t.offset.x: print >> fp, "X%+f" % (t.offset.x),
if t.offset.y: print >> fp, "Y%+f" % (t.offset.y),
if t.offset.z: print >> fp, "Z%+f" % (t.offset.z),
if t.offset.a: print >> fp, "A%+f" % (t.offset.a),
if t.offset.b: print >> fp, "B%+f" % (t.offset.b),
if t.offset.c: print >> fp, "C%+f" % (t.offset.c),
if t.offset.u: print >> fp, "U%+f" % (t.offset.u),
if t.offset.v: print >> fp, "V%+f" % (t.offset.v),
if t.offset.w: print >> fp, "W%+f" % (t.offset.w),
if t. | offset.w: print >> fp, "W%+f" % (t.offset.w),
if t.frontangle: print >> fp, "I%+f" % (t.frontangle),
if t.backangle: print >> fp, "J%+f" % (t.backangle),
if t.orientation: print >> fp, "Q%+d" % (t.orientation),
if comments.has_key(p) and comments[p]:
print >> fp | , ";%s" % (comments[p])
else:
print >> fp
fp.close()
def assign(self,tooltable,entry,comments,fms):
pocket = entry['P']
if not self.random_toolchanger:
self.fakepocket += 1
if self.fakepocket >= len(tooltable):
print "too many tools. skipping tool %d" % (toolno)
return
if not fms is None:
fms[self.fakepocket] = pocket
pocket = self.fakepocket
if pocket < 0 or pocket > len(tooltable):
print "max pocket number is %d. skipping tool %d" % (len(tooltable) - 1, toolno)
return
tooltable[pocket].zero()
for (key,value) in entry.items():
if key == 'T' : tooltable[pocket].toolno = value
if key == 'Q' : tooltable[pocket].orientation = value
if key == 'D' : tooltable[pocket].diameter = value
if key == 'I' : tooltable[pocket].frontangle = value
if key == 'J' : tooltable[pocket].backangle = value
if key == 'X' : tooltable[pocket].offset.x = value
if key == 'Y' : tooltable[pocket].offset.y = value
if key == 'Z' : tooltable[pocket].offset.z = value
if key == 'A' : tooltable[pocket].offset.a = value
if key == 'B' : tooltable[pocket].offset.b = value
if key == 'C' : tooltable[pocket].offset.c = value
if key == 'U' : tooltable[pocket].offset.u = value
if key == 'V' : tooltable[pocket].offset.v = value
if key == 'W' : tooltable[pocket].offset.w = value
if key == 'comment' : comments[pocket] = value # aaargh
def parseline(self,lineno,line):
"""
read a tooltable line
if an entry was parsed successfully, return a Tool() instance
"""
line.rstrip("\n")
if re.match('\A\s*T\d+',line):
semi = line.find(";")
if semi != -1:
comment = line[semi+1:]
else:
comment = None
entry = line.split(';')[0]
result = dict()
for field in entry.split():
(name,value) = re.search('([a-zA-Z])([+-]?\d*\.?\d*)',field).groups()
if name:
key = name.upper()
result[key] = EmcToolTable.ttype[key](value)
else:
print "%s:%d bad line: '%s' " % (self.filename, lineno, entry)
result['comment'] = comment
return result
print "%s:%d: unrecognized tool table entry '%s'" % (self.filename,lineno,line)
def restore_state(self,e):
pass
def save_state(self,e):
pass
|
georgistanev/django-dash | src/dash/contrib/plugins/dummy/dash_widgets.py | Python | gpl-2.0 | 1,869 | 0.00214 | __author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ | = 'GPL 2.0/LGPL 2.1'
__all__ = (
'BaseDummyWidget', 'Dummy1x1Widget', 'Dummy1x2Widget', 'Dummy2x1Wid | get',
'Dummy2x2Widget', 'Dummy3x3Widget'
)
from django.template.loader import render_to_string
from dash.base import BaseDashboardPluginWidget
# ************************************************************************
# ************************* Base widget for Dummy plugin *****************
# ************************************************************************
class BaseDummyWidget(BaseDashboardPluginWidget):
"""
Base dummy plugin widget.
"""
media_js = [
#'js/dash_plugin_dummy.js',
]
media_css = [
#'css/dash_plugin_dummy.css',
]
def render(self, request=None):
context = {'plugin': self.plugin}
return render_to_string('dummy/render.html', context)
# ************************************************************************
# ************************* Specific widgets for Dummy plugin ************
# ************************************************************************
class Dummy1x1Widget(BaseDummyWidget):
"""
1x1 dummy plugin widget.
"""
plugin_uid = 'dummy_1x1'
class Dummy1x2Widget(BaseDummyWidget):
"""
1x2 dummy plugin widget.
"""
plugin_uid = 'dummy_1x2'
cols = 1
rows = 2
class Dummy2x1Widget(BaseDummyWidget):
"""
2x1 dummy plugin widget.
"""
plugin_uid = 'dummy_2x1'
cols = 2
rows = 1
class Dummy2x2Widget(BaseDummyWidget):
"""
2x2 dummy plugin widget.
"""
plugin_uid = 'dummy_2x2'
cols = 2
rows = 2
class Dummy3x3Widget(BaseDummyWidget):
"""
3x3 dummy plugin widget.
"""
plugin_uid = 'dummy_3x3'
cols = 3
rows = 3
|
rbuffat/pyidf | tests/test_curvedoubleexponentialdecay.py | Python | apache-2.0 | 3,329 | 0.003905 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.performance_curves import CurveDoubleExponentialDecay
log = logging.getLogger(__name__)
class TestCurveDoubleExponentialDecay(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_curvedoubleexponentialdecay(self):
pyidf.validation_level = ValidationLevel.error
obj = CurveDoubleExponentialDecay()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_coefficient1_c1 = 2.2
obj.coefficient1_c1 = var_coefficient1_c1
# real
var_coefficient2_c2 = 3.3
obj.coefficient2_c2 = var_coefficient2_c2
# real
var_coefficient3_c3 = 4.4
obj.coefficient3_c3 = var_coefficient3_c3
# real
var_coefficient3_c4 = 5.5
obj.coefficient3_c4 = var_coefficient3_c4
# real
var_coefficient3_c5 = 6.6
obj.coefficient3_c5 = var_coefficient3_c5
# real
var_minimum_value_of_x = 7.7
obj.minimum_value_of_x = var_minimum_value_of_x
# real
var_maximum_value_of_x = 8.8
obj.maximum_value_of_x = var_maximum_value_of_x
# real
var_minimum_curve_output = 9.9
obj.minimum_curve_output = var_minimum_curve_output
# real
var_maximum_curve_output = 10.1
obj.maximum_curve_output = var_maximum_curve_output
# alpha
var_input_unit_type_for_x = "Dimensionless"
obj.input_unit_type_for_x = var_input_unit_type_for_x
# alpha
var_output_unit_type = "Dimensionless"
obj.output_unit_type = var_output_unit_type
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.curvedoubleexponentialdecays[0].name, var_name)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].coefficient1_c1, var_coefficient1_c1)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].coefficient2_c2, var_coefficient2_c2)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].coefficient3_c3, var_coefficient3_c3)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].coefficient3_c4, var_coefficient3_c4)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].coefficient3_c5, var_coefficient3_c5)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].minimum_value_of_x, var_minimum_value_of_x)
self.assertAlmostEqual(idf2.curvedoubleexponent | ialdecays[0].maximum_value_of_x, var_maximum_value_of_x)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].minimum_curve_output, var_minimum_curve_output)
self.assertAlmostEqual(idf2.curvedoubleexponentialdecays[0].maximum_curve_output, var_maximum_curve_output)
self.assertEqual(idf2.curvedoubleexponentialdecays[0].input_unit_type_for_x, var_input_unit_type | _for_x)
self.assertEqual(idf2.curvedoubleexponentialdecays[0].output_unit_type, var_output_unit_type) |
g-goessel/mathdoku_solve | InterfaceMathDoku.py | Python | mpl-2.0 | 1,799 | 0.021861 | # Interface MathDoku
from fonctions import *
from bruteforce import *
from numpy import array
donnees = {}
taille=int(input('Taille de la grille ? '))
n = int(input('Nombre de blocs dans la grille \n'))
grille=array([[0 for i in range(taille)] for j in range(taille)])
try:
for i in range(1,n+1):
print('Votre grille : \n', grille)
valeur = int(input('Valeur du bloc ' +str(i) +'\n'))
nombre_elements = int(input('Nombre d\'éléments du bloc \n'))
#si il y a plus de 7 éléments dans le bloc il est trop gourmand en RAM de calculer les possibilités
if nombre_elements > 8 :
print('Bloc trop grand pour être solvable, désolé')
assert()
liste_node = []
liste_generale = [valeur]
x_y = input('Entrez les coordonnées sous la forme (x,y),(x,y),... ')
x_y=x_y.split('),(')
if len(x_y) != nombre_elements:
print('Trop ou peu d\'éléments rentrés')
assert()
to_add=[ (int(x.replace('(','')),int(y.replace(')','')) ) for (x,y) in [i.split(',') for i in x_y] ]
#on complete la grille de présentation
for (x,y) in [i for i in to_add] :
grille[x,y]=valeur
liste_generale.append(to_add)
#on rajoute les combinaisons possibles dans le dict
liste_generale.append(combi_possibles(valeur,nombre_elements,n))
donnees[i] = liste_generale
print('Votre grille est : \n', grille)
print(donnees)
#l'utilisation d'une liste de listes est plus efficace qu'une matrice
resultat=bruteforce(donnees.tolist(),taille)
print('Résultat obtenu en ',resultat[1],'essais et ',r | esultat[2],'secondes :\n',array(resultat[0]))
except:
print('Une er | reur s\'est produite, veuillez réessayer')
|
tensorflow/tensorflow | tensorflow/python/kernel_tests/random/stateless_random_ops_test.py | Python | apache-2.0 | 23,279 | 0.009236 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.comp | at import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tens | orflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.platform import test
# Note that in theory each test will reset the eager context and may choose to
# hide some devices, so we shouldn't cache this transient info. Tests in this
# file don't make those config changes, so caching is fine. It provides a good
# speed-up.
_cached_device = None
def get_device():
global _cached_device
if _cached_device is not None:
return _cached_device
# Precedence from high to low
for device_type in ('XLA_GPU', 'GPU', 'XLA_CPU', 'CPU'):
devices = config.list_logical_devices(device_type)
if devices:
_cached_device = devices[0]
return _cached_device
raise ValueError('Cannot find any suitable device. Available devices: %s' %
config.list_logical_devices())
BEFORE_EXPIRE = (2020, 10, 24)
AFTER_EXPIRE = (2020, 10, 26)
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
SEEDS = ((7, 17), (11, 5), (2, 3))
SEED_TYPES = [dtypes.int32, dtypes.int64]
def float_cases(shape_dtypes=(None,)):
cases = (
# Uniform distribution, with and without range
('uniform', stateless.stateless_random_uniform, random_ops.random_uniform,
{}),
('uniform2', stateless.stateless_random_uniform,
random_ops.random_uniform, dict(minval=2.2, maxval=7.1)),
# Normal distribution, with and without mean+stddev
('normal', stateless.stateless_random_normal, random_ops.random_normal,
{}),
('normal2', stateless.stateless_random_normal, random_ops.random_normal,
dict(mean=2, stddev=3)),
# Truncated normal distribution, with and without mean+stddev
('trnorm', stateless.stateless_truncated_normal,
random_ops.truncated_normal, {}),
('trnorm2', stateless.stateless_truncated_normal,
random_ops.truncated_normal, dict(mean=3, stddev=4)),
)
# Explicitly passing in params because capturing cell variable from loop is
# problematic in Python
def wrap(op, dtype, shape, shape_dtype, seed, **kwargs):
device_type = get_device().device_type
# Some dtypes are not supported on some devices
if (dtype == dtypes.float16 and device_type in ('XLA_GPU', 'XLA_CPU') or
dtype == dtypes.bfloat16 and device_type == 'GPU'):
dtype = dtypes.float32
shape_ = (constant_op.constant(shape, dtype=shape_dtype)
if shape_dtype is not None else shape)
return op(seed=seed, shape=shape_, dtype=dtype, **kwargs)
def _name(a):
if hasattr(a, 'name'):
return a.name
else:
return a
for dtype in dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
for name, stateless_op, stateful_op, kwargs in cases:
yield (('%s_%s_%s_%s' %
(name, _name(dtype), shape, _name(shape_dtype))).replace(
' ', ''),
functools.partial(wrap, stateless_op, dtype, shape,
shape_dtype, **kwargs),
functools.partial(wrap, stateful_op, dtype, shape, shape_dtype,
**kwargs))
def int_cases(shape_dtypes=(None,), minval_maxval=None):
def wrap(op, minval, maxval, shape, shape_dtype, dtype, seed, **kwargs):
shape_ = (constant_op.constant(shape, dtype=shape_dtype)
if shape_dtype is not None else shape)
return op(
seed=seed, shape=shape_, minval=minval, maxval=maxval, dtype=dtype,
**kwargs)
if minval_maxval is None:
minval_maxval = ((2, 11111),)
for minval, maxval in minval_maxval:
for shape_dtype in shape_dtypes:
for shape in (), (3,), (2, 5):
for dtype in dtypes.int32, dtypes.int64:
yield ('uniform_%s_%s' % (minval, maxval),
functools.partial(wrap, stateless.stateless_random_uniform,
minval, maxval, shape, shape_dtype, dtype),
functools.partial(wrap, random_ops.random_uniform, minval,
maxval, shape, shape_dtype, dtype))
def multinomial_cases():
num_samples = 10
def wrap(op, logits, logits_dtype, output_dtype, seed):
return op(seed=seed,
logits=constant_op.constant(logits, dtype=logits_dtype),
num_samples=num_samples, output_dtype=output_dtype)
for logits_dtype in np.float16, np.float32, np.float64:
for output_dtype in dtypes.int32, dtypes.int64:
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
yield ('multinomial',
functools.partial(wrap, stateless.stateless_multinomial, logits,
logits_dtype, output_dtype),
functools.partial(wrap, random_ops.multinomial, logits,
logits_dtype, output_dtype))
def gamma_cases():
def wrap(op, alpha, dtype, shape, seed):
return op(seed=seed, shape=shape,
alpha=constant_op.constant(alpha, dtype=dtype), dtype=dtype)
for dtype in np.float16, np.float32, np.float64:
for alpha in ([[.5, 1., 2.]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]):
yield ('gamma',
functools.partial(wrap, stateless.stateless_random_gamma, alpha,
dtype, (10,) + tuple(np.shape(alpha))),
functools.partial(wrap, random_ops.random_gamma, alpha, dtype,
(10,)))
def poisson_cases():
def wrap(op, lam, lam_dtype, out_dtype, shape, seed):
return op(seed=seed, shape=shape,
lam=constant_op.constant(lam_dtype(lam), dtype=lam_dtype),
dtype=out_dtype)
for lam_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:
for out_dtype in np.float16, np.float32, np.float64, np.int32, np.int64:
for lam in ([[5.5, 1., 2.]], [[7.5, 10.5], [3.8, 8.2], [1.25, 9.75]]):
yield ('poisson',
functools.partial(wrap, stateless.stateless_random_poisson, lam,
lam_dtype, out_dtype,
(10,) + tuple(np.shape(lam))),
functools.partial(wrap, random_o |
idlesign/uwsgiconf | tests/presets/test_nice.py | Python | bsd-3-clause | 4,893 | 0.002452 | from os import environ
from uwsgiconf.presets.nice import Section, PythonSection
def test_nice_section(assert_lines):
assert_lines([
'env = LANG=en_US.UTF-8',
'workers = %k',
'die-on-term = true',
'vacuum = true',
'threads = 4',
], Section(threads=4))
assert_lines([
'logto',
], Section(), assert_in=False)
assert_lines([
'enable-threads = true',
'uid = www-data',
'gid = www-data',
'logto = /a/b.log',
], Section(threads=True, log_into='/a/b.log').configure_owner())
assert_lines([
'workers = 13',
'touch-reload', 'test_nice.py',
], Section(workers=13, touch_reload=__file__))
assert_lines([
'disable-write-exception = true',
'ignore-write-errors = true',
'ignore-sigpipe = true',
'log-master = true',
'threaded-logger = true',
], Section(log_dedicated=True, ignore_write_errors=True))
assert '%(headers) headers in %(hsize) bytes' in Section().get_log_format_default()
def test_get_bundled_static_path(assert_lines):
path = Section.get_bundled_static_path('503.html')
assert path.endswith('uwsgiconf/contrib/django/uwsgify/static/uwsgify/503.html')
def test_configure_https_redirect(assert_lines):
section = Section()
section.configure_https_redirect()
assert_lines(
'route-if-not = eq:${HTTPS};on redirect-301:https://${HTTP_HOST}${REQUEST_URI}',
section
)
def test_configure_maintenance_mode(assert_lines, tmpdir):
section = Section()
section.configure_maintenance_mode('/watch/that/file', '/serve/this/file')
section.configure_maintenance_mode('/watch/that/file/also', 'http://pythonz.net')
assert_lines([
'route-if = exists:/watch/that/file static:/serve/this/file',
'route-if = exists:/watch/that/file/also redirect-302:http://pythonz.net',
], section)
afile = tmpdir.join('maintenance_file')
section = Section()
section.configure_maintenance_mode(f'{afile}', 'app')
assert_lines([
f'env = UWSGICONF_MAINTENANCE={afile}',
f'touch-reload = {afile}',
], section)
assert_lines([
'wsgi = uwsgiconf.maintenance:app_maintenance',
], section, assert_in=False)
# Create file
afile.write('')
section = Section()
section.configure_maintenance_mode(f'{afile}', 'app')
assert_lines([
f'env = UWSGICONF_MAINTENANCE={afile}',
f'touch-reload = {afile}',
'env = UWSGICONF_MAINTENANCE_INPLACE=1',
'wsgi = uwsgiconf.maintenance:app_maintenance',
], section)
assert environ['UWSGICONF_MAINTENANCE'] == f'{afile}'
assert environ['UWSGICONF_MAINTENANCE_INPLACE'] == '1'
section.configure_maintenance_mode(f'{afile}', 'app::mypack.here.there:myfunc')
assert_lines([
'wsgi = mypack.here.there:myfunc',
], section)
def test_configure_logging_json(assert_lines):
section = Section()
section.configure_logging_json()
assert_lines([
'logger-req = stdio:',
'log-form | at = %(method) %(uri) -> %(status)',
'log-req-encoder = json {"dt": "${strftime:%%Y-%%m-%%dT%%H:%%M:%%S%%z}", "src": "uwsgi.req"',
'log-req-encoder = nl',
'"src": "uwsgi.out"',
], section)
def test_configure_certbot_https(assert_lines, monkeypatch):
monkeypatch.setattr('pathlib.Path.exists', lambda self: True)
section = Section()
section.configure_certbot_https('mydomain.org', ' | /var/www/', address=':4443')
assert_lines([
'static-map2 = /.well-known/=/var/www/',
'https-socket = :4443,/etc/letsencrypt/live/mydomain.org/fullchain.pem,'
'/etc/letsencrypt/live/mydomain.org/privkey.pem',
], section)
section = Section.bootstrap(['http://:80'])
section.configure_certbot_https('mydomain.org', '/var/www/', http_redirect=True)
assert_lines([
'shared-socket = :80',
'shared-socket = :443',
'http-socket = =0',
'https-socket = =1,/etc/letsencrypt/live/mydomain.org/fullchain.pem,'
'/etc/letsencrypt/live/mydomain.org/privkey.pem',
'route-if-not = eq:${HTTPS};on redirect-301:https://${HTTP_HOST}${REQUEST_URI}',
], section)
def test_nice_python(assert_lines):
assert_lines([
'plugin = python',
'pyhome = /home/idle/venv/\npythonpath = /home/idle/apps/',
'wsgi = somepackage.module',
'need-app = true',
], PythonSection(
params_python=dict(
# We'll run our app using virtualenv.
python_home='/home/idle/venv/',
search_path='/home/idle/apps/',
),
wsgi_module='somepackage.module',
embedded_plugins=None
))
# Embedded plugins = True
assert_lines('plugin = python', PythonSection(wsgi_module='somepackage.module'), assert_in=False)
|
sigmunau/nav | python/nav/web/business/urls.py | Python | gpl-2.0 | 472 | 0.002119 | """URL config for business | tool"""
from django.conf.urls import url, patterns
from nav.web.business import views
urlpatterns = patterns('',
url(r'^$', views.BusinessView.as_view(),
name='business-index'),
url('^device_availability/$', views.DeviceAvailabilityReport.as_view(),
name='business-report-device-availability'),
url('^link_availability/$', views.LinkAvailabilityReport.as_view(),
name='business-report-link-availability') |
)
|
inspirehep/invenio-records | invenio_records/upgrades/records_2014_04_14_json_type_fix.py | Python | gpl-2.0 | 1,655 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Change JSON data type from TEXT to LONGTEXT."""
from invenio_upgrader.api import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.exc import OperationalError
depends_on = []
def info():
"""Return information about the upgrade recipe."""
return __doc__
def do_upgrade():
"""Perform the update recipe."""
try:
op.alter_column(
u'bibrec', 'additional_info',
existing_type=mysql.TEXT(),
type_=mysql.LONGTEXT(),
nullable=True
)
except OperationalError:
op.add_column('bibrec',
sa.Column('additional_info',
mysql.LONGTEXT(),
| nullable=True)
| )
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
|
Eljee/symfit | symfit/tests/tests.py | Python | gpl-2.0 | 23,874 | 0.004356 | from __future__ import division, print_function
import unittest
import inspect
import sympy
from sympy import symbols
import numpy as np
from symfit.api import Variable, Parameter, Fit, FitResults, Maximize, Minimize, exp, Likelihood, ln, log, variables, parameters
from symfit.functions import Gaussian, Exp
import scipy.stats
from scipy.optimize import curve_fit
from symfit.core.support import sympy_to_scipy, sympy_to_py
import matplotlib.pyplot as plt
import seaborn
class TddInPythonExample(unittest.TestCase):
def test_gaussian(self):
x0, sig = parameters('x0, sig')
x = Variable()
new = sympy.exp(-(x - x0)**2/(2*sig**2))
self.assertIsInstance(new, sympy.exp)
g = Gaussian(x, x0, sig)
self.assertTrue(issubclass(g.__class__, sympy.exp))
def test_callable(self):
a, b = parameters('a, b')
x, y = variables('x, y')
func = a*x**2 + b*y**2
result = func(x=2, y=3, a=3, b=9)
self.assertEqual(result, 3*2**2 + 9*3**2)
xdata = np.arange(1,10)
ydata = np.arange(1,10)
result = func(x=ydata, y=ydata, a=3, b=9)
self.assertTrue(np.array_equal(result, 3*xdata**2 + 9*ydata**2))
def test_read_only_results(self):
"""
Fit results should be read-only. Let's try to break this!
"""
xdata = np.linspace(1,10,10)
ydata = 3*xdata**2
a = Parameter(3.0, min=2.75)
b = Parameter(2.0, max=2.75)
x = Variable('x')
new = a*x**b
fit = Fit(new, xdata, ydata)
fit_result = fit.execute()
# Break it!
try:
fit_result.params = 'hello'
except AttributeError:
self.assertTrue(True) # desired result
else:
self.assertNotEqual(fit_result.params, 'hello')
try:
# Bypass the property getter. This will work, as it set's the instance value of __params.
fit_result.__params = 'hello'
except AttributeError as foo:
self.assertTrue(False) # undesired result
else:
self.assertNotEqual(fit_result.params, 'hello')
# The assginment will have succeeded on the instance because we set it from the outside.
# I must admit I don't fully understand why this is allowed and I don't like it.
# However, the tests below show that it did not influence the class method itself so
# fitting still works fine.
self.assertEqual(fit_result.__params, 'hello')
# Do a second fit and dubble check that we do not overwrtie something crusial.
xdata = np.arange(-5, 5, 1)
ydata = np.arange(-5, 5, 1)
xx, yy = np.meshgrid(xdata, ydata, sparse=False)
xdata_coor = np.dstack((xx, yy))
zdata = (2.5*xx**2 + 3.0*yy**2)
a = Parameter(2.5, max=2.75)
b = Parameter(3.0, min=2.75)
x = Variable()
y = Variable()
new = (a*x**2 + b*y**2)
fit_2 = Fit(new, xdata_coor, zdata)
fit_result_2 = fit_2.execute()
self.assertNotAlmostEqual(fit_result.params.a, fit_result_2.params.a)
self.assertAlmostEqual(fit_result.params.a, 3.0)
self.assertAlmostEqual(fit_result_2.params.a, 2.5)
self.assertNotAlmostEqual(fit_result.params.b, fit_result_2.params.b)
self.assertAlmostEqual(fit_result.params.b, 2.0)
self.assertAlmostEqual(fit_result_2.params.b, 3.0)
def test_fitting(self):
xdata = np.linspace(1,10,10)
ydata = 3*xdata**2
a = Parameter(3.0)
b = Parameter(2.0)
x = Variable('x')
new = a*x**b
fit = Fit(new, xdata, ydata)
func = sympy_to_py(new, [x], [a, b])
result = func(xdata, 3, 2)
self.assertTrue(np.array_equal(result, ydata))
result = fit.scipy_func(fit.xdata, [3, 2])
self.assertTrue(np.array_equal(result, ydata))
args, varargs, keywords, defaults = inspect.getargspec(func)
# self.assertEqual(args, ['x', 'a', 'b'])
fit_result = fit.execute()
self.assertIsInstance(fit_result, FitResults)
self.assertAlmostEqual(fit_result.params.a, 3.0)
self.assertAlmostEqual(fit_result.params.b, 2.0)
self.assertIsInstance(fit_result.params.a_stdev, float)
self.assertIsInstance(fit_result.params.b_stdev, float)
self.assertIsInstance(fit_result.r_squared, float)
# Test several false ways to access the data.
self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_fdska'])
self.assertRaises(AttributeError, getattr, *[fit_result.params, 'c'])
self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_stdev_stdev'])
self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a_stdev_'])
self.assertRaises(AttributeError, getattr, *[fit_result.params, 'a__stdev'])
def test_numpy_functions(self):
xdata = np.linspace(1,10,10)
ydata = 45*np.log(xdata*2)
a = Parameter()
b = Parameter(value=2.1, fixed=True)
x = Variable()
new = a*sympy.log(x*b)
def test_grid_fitting(self):
xdata = np.arange(-5, 5, 1)
ydata = np.arange(-5, 5, 1)
xx, yy = np.meshgrid(xdata, ydata, sparse=False)
xdata_coor = np.dstack((xx, yy))
zdata = (2.5*xx**2 + 3.0*yy**2)
a = Parameter(2.5, max=2.75)
b = Parameter(3.0, min=2.75)
x = Variable()
y = Variable()
new = (a*x**2 + b*y**2)
fit = Fit(new, xdata_coor, zdata)
# Test the flatten function for consistency.
xdata_coor_flat, zdata_flat = fit._flatten(xdata_coor, zdata)
# _flatten transposes such arrays because the variables are in the deepest dimension instead of the first.
# This is normally not a problem because all we want from the fit is the correct parameters.
self.assertFalse(np.array_equal(zdata, zdata_flat.reshape((10,10))))
self.assertTrue(np.array_equal(zdata, zdata_flat.reshape((10,10)).T))
self.assertFalse(np.array_equal(xdata_coor, xdata_coor_flat.reshape((10,10,2))))
new_xdata = xdata_coor_flat.reshape((2,10,10)).T
self.assertTrue(np.array_equal(xdata_coor, new_xdata))
results = fit.execute()
self.assertAlmostEqual(results.params.a, 2.5)
self.assertAlmostEqual(results.params.b, 3.)
def test_2D_fitting(self):
xdata = np.random.randint(-10, 11, size=(2, 400))
zdata = 2.5*xdata[0]**2 + 7.0*xdata[1]**2
a = Parameter()
b = Parameter()
x = Variable()
y = Variable()
new = a*x**2 + b*y**2
fit = Fit(new, xdata, zdata)
result = fit.scipy_func(fit.xdata, [2, 3])
import inspect
args, varargs, keywords, defaults = inspect. | getargspec(fit.scipy_func)
self.assertEqual(args, ['x', 'p'])
fit_result = fit.execute()
self.assertIsInstance(fit_result, FitResults)
def test_gaussian_fitting(self):
xdata = 2*np.random.rand(10000) - 1 # random betwen [-1, 1]
ydata = scipy.stats.norm.pdf(xdata, loc=0.0, scale=1.0)
x0 = Parameter()
sig = Parameter()
A = Paramet | er()
x = Variable()
g = A * Gaussian(x, x0, sig)
fit = Fit(g, xdata, ydata)
fit_result = fit.execute()
self.assertAlmostEqual(fit_result.params.A, 0.3989423)
self.assertAlmostEqual(np.abs(fit_result.params.sig), 1.0)
self.assertAlmostEqual(fit_result.params.x0, 0.0)
# raise Exception([i for i in fit_result.params])
sexy = g(x=2.0, **fit_result.params)
ugly = g(
x=2.0,
x0=fit_result.params.x0,
A=fit_result.params.A,
sig=fit_result.params.sig,
)
self.assertEqual(sexy, ugly)
def test_2_gaussian_2d_fitting(self):
np.random.seed(4242)
mean = (0.3, 0.3) # x, y mean 0.6, 0.4
cov = [[0.01**2,0],[0,0.01**2]]
data = np.random.multivariate_normal(mean, cov, 1000000)
mean = (0.7,0.7) # x, y mean 0.6, 0.4
|
kylon/pacman-fakeroot | test/pacman/tests/sync020.py | Python | gpl-2.0 | 357 | 0.008403 | self.description = "Install a group f | rom a sync db"
sp1 = pmpkg("pkg1")
sp1.groups = ["grp"]
sp2 = pmpkg("pkg2")
sp2.groups = ["grp"]
sp3 = pmpkg("pkg3")
sp3.groups = ["grp"]
for p in sp1, sp2, sp3:
self.addpkg2db("sync", p);
self.args = "-S %s" % "grp"
self.addrule("PACMAN_RETCODE=0")
for p in sp1, sp2, sp3:
self.addrule("PKG_EXIST=%s" % | p.name)
|
xpansa/purchase-workflow | purchase_requisition_multicurrency/model/purchase_order.py | Python | agpl-3.0 | 2,905 | 0 | # -*- coding: utf-8 -*-
#
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import m | odels, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.one
@api.depends('price_unit',
'price_subtotal',
'order_id.pricelist_id.currency_i | d',
'order_id.requisition_id.date_exchange_rate',
'order_id.requisition_id.currency_id')
def _compute_prices_in_company_currency(self):
""" """
requisition = self.order_id.requisition_id
date = requisition.date_exchange_rate or fields.Date.today()
from_curr = self.order_id.currency_id.with_context(date=date)
if requisition and requisition.currency_id:
to_curr = requisition.currency_id
else:
to_curr = self.order_id.company_id.currency_id
self.price_unit_co = from_curr.compute(self.price_unit,
to_curr, round=False)
self.price_subtotal_co = from_curr.compute(self.price_subtotal,
to_curr, round=False)
@api.multi
def _requisition_currency(self):
for rec in self:
requisition = rec.order_id.requisition_id
if requisition:
rec.requisition_currency = requisition.currency_id
price_unit_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Unit Price",
digits=dp.get_precision('Account'),
store=True,
help="Unit Price in company currency."
)
price_subtotal_co = fields.Float(
compute='_compute_prices_in_company_currency',
string="Subtotal",
digits=dp.get_precision('Account'),
store=True,
help="Subtotal in company currency."
)
order_currency = fields.Many2one(string="Currency", readonly=True,
related="order_id.currency_id")
requisition_currency = fields.Many2one(
"res.currency", string="Requisition Currency", readonly=True,
compute="_requisition_currency")
|
ufal/neuralmonkey | scripts/preprocess_bert.py | Python | bsd-3-clause | 3,940 | 0.000254 | #!/usr/bin/env python3
"""Creates training data for the BERT network training
(noisified + masked gold predictions) using the input corpus.
The masked Gold predictions use Neural Monkey's PAD_TOKEN to indicate
tokens that should not be classified during training.
We only leave `coverage` percent of symbols for classification. These
symbols are left unchanged on input with a probability of `1 - mask_prob`.
If they are being changed, they are replaced by the `mask_token` with a
probability of `1 - replace_prob` and by a random vocabulary token otherwise.
"""
import argparse
import os
import numpy as np
from neuralmonkey.logging import log as _log
from neuralmonkey.vocabulary import (
Vocabulary, PAD_TOKEN, UNK_TOKEN, from_wordlist)
def log(message: str, color: str = "blue") -> None:
_log(message, color)
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--input_file", type=str, default="/dev/stdin")
parser.add_argument("--vocabulary", type=str, required=True)
parser.add_argument("--output_prefix", type=str, default=None)
parser.add_argument("--mask_token", type=str, default=UNK_TOKEN,
help="token used to mask the tokens")
parser.add_argument("--coverage", type=float, default=0.15,
help=("percentage of tokens that should be left "
"for classification during training"))
parser.add_argument("--mask_prob", type=float, default=0.8,
help=("probability of the classified token being "
"replaced by a different token on input"))
parser.add_argument("--replace_prob", type=float, default=0.1,
help=("probability of the classified token being "
"replaced by a random token instead of "
"mask_token"))
parser.add_argument("--vocab_contains_header", type=bool, default=True)
parser.add_argument("--vocab_contains_frequencies",
type=bool, default=True)
args = parser.parse_args()
assert (args.coverage <= 1 and args.coverage >= 0)
assert (args.mask_prob <= 1 and args.mask_prob >= 0)
assert (args.replace_prob <= 1 and args.replace_prob > | = 0)
log("Loading vocabulary.")
vocabulary = from_wordlist(
args.vocabulary,
contains_header=args.vocab_contains_header,
contains_frequencies=args.vocab_contains_frequencies)
mask_prob = args.mask_prob
replace_prob = args.replace_prob
keep_prob = 1 - mask_prob - replace_prob
sam | ple_probs = (keep_prob, mask_prob, replace_prob)
output_prefix = args.output_prefix
if output_prefix is None:
output_prefix = args.input_file
out_f_noise = "{}.noisy".format(output_prefix)
out_f_mask = "{}.mask".format(output_prefix)
out_noise_h = open(out_f_noise, "w", encoding="utf-8")
out_mask_h = open(out_f_mask, "w", encoding="utf-8")
log("Processing data.")
with open(args.input_file, "r", encoding="utf-8") as input_h:
# TODO: performance optimizations
for line in input_h:
line = line.strip().split(" ")
num_samples = int(args.coverage * len(line))
sampled_indices = np.random.choice(len(line), num_samples, False)
output_noisy = list(line)
output_masked = [PAD_TOKEN] * len(line)
for i in sampled_indices:
random_token = np.random.choice(vocabulary.index_to_word[4:])
new_token = np.random.choice(
[line[i], args.mask_token, random_token], p=sample_probs)
output_noisy[i] = new_token
output_masked[i] = line[i]
out_noise_h.write(str(" ".join(output_noisy)) + "\n")
out_mask_h.write(str(" ".join(output_masked)) + "\n")
if __name__ == "__main__":
main()
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam | unittests/Applications/test_CompareDictionary.py | Python | gpl-2.0 | 116 | 0.008621 | import unittest
from PyFoam.Applica | tions.CompareDictionary import CompareDictionary
theSuite=unittest.TestSuite()
| |
bkolli/swift | test/__init__.py | Python | apache-2.0 | 2,485 | 0.000805 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this fil | e except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitat | ions under the License.
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
from __future__ import print_function
import sys
import os
try:
from unittest.util import safe_repr
except ImportError:
# Probably py26
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
# make unittests pass on all locale
import swift
setattr(swift, 'gettext_', lambda x: x)
from swift.common.utils import readconf
# Work around what seems to be a Python bug.
# c.f. https://bugs.launchpad.net/swift/+bug/820185.
import logging
logging.raiseExceptions = False
def get_config(section_name=None, defaults=None):
"""
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config = {}
if defaults is not None:
config.update(defaults)
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
try:
config = readconf(config_file, section_name)
except SystemExit:
if not os.path.exists(config_file):
print('Unable to read test config %s - file not found'
% config_file, file=sys.stderr)
elif not os.access(config_file, os.R_OK):
print('Unable to read test config %s - permission denied'
% config_file, file=sys.stderr)
else:
print('Unable to read test config %s - section %s not found'
% (config_file, section_name), file=sys.stderr)
return config
|
zalmanu/Enjoy-City-Admin | admin/models.py | Python | apache-2.0 | 2,126 | 0.005644 | from django.db import models
# Create your models here.
from djangotoolbox.fields import ListField, EmbeddedModelField
from django_mongodb_engine.storage import GridFSStorage
gridfs_storage = GridFSStorage()
class Cordinates(models.Model):
lat = models.FloatField(null=False, blank=False)
lng = models.FloatField(null=False, blank=False)
class Meta:
verbose_name_plural = 'Coordinates'
class City(models.Model):
name = models.CharField(max_length=255)
#coordinates = EmbeddedModelField('Coordinates')
class Meta:
verbose_name_plural = 'Cities'
class Tag(models.Model):
value = models.CharField(max_length=255)
class Meta:
verbose_name_plural = 'Tags'
class Category(models.Model):
name = models.CharField(max_length=255)
photo = models.FileField(storage=gridfs_storage, upload_to='/media/categories/')
class Meta:
verbose_name_plural = 'Categories'
class Location(models.Model):
user = models.CharField(max_length=255)
name = models.CharField(max_length=255)
description = models.TextField()
category = models.IntegerField(null=True, blank=True)
tags = ListField(models.CharField(max_length=15), null=True, blank=True)
#coordinates = EmbeddedModelField('Coordinates')
email = models.EmailField()
phone_number = models.CharField(max_length=15, null=True, blank=True)
rating = models.PositiveSmallIntegerField(blank=True, null=True)
photo = models.FileField(st | orage=gridfs_storage, upload_to='/media/locations/',
null=True, blank=True)
class Meta:
verbose_name_plural = 'Locations'
class Content(models.Model):
location_id = models.CharField(max_length=15)
description = models.TextField(null=True, blank=True)
tags = ListF | ield(models.PositiveIntegerField(), null=True, blank=True)
photo = models.FileField(storage=gridfs_storage, upload_to='/media/locations/',
blank=True, null=True)
expiration_date = models.DateTimeField(null=True, blank=True)
class Meta:
verbose_name_plural = 'Content Items'
|
neurotechuoft/Wall-EEG | Code/OpenBCIPy/src/machine-learning/other/exercise_01_multichannel.py | Python | mit | 4,069 | 0.019169 | # -*- coding: utf-8 -*-
"""
BCI workshop 2015
Exercise 1b: A neurofeedback interface (multi-channel)
Description:
In this exercise, we'll try and play around with a simple interface that
receives EEG from mulitple electrodes, computes standard frequency band powers
and displays both the raw biosignals and the features.
"""
import mules # The signal acquisition toolbox we'll use (MuLES)
import numpy as np # Module that simplifies computations on matrices
import matplotlib.pyplot as plt # Module used for plotting
import bci_workshop_tools as BCIw # Bunch of useful functions for the workshop
if __name__ == "__main__":
# MuLES connection parameters
mules_ip = '127.0.0.1'
muse_port = 30000
# Creates a mules_client
mules_client = mules.MulesClient(mules_ip, muse_port)
params = mules_client.getparams() # Get the device | parameters
#%% Set the experiment parameters
eeg_buffer_secs = 15 # Size of the EEG data buffer used for plotting the
# signal (in seconds)
win_test_secs = 1 # Length of the window used for computing the features
# (in seconds)
overlap_secs = 0.5 # Overlap between two conse | cutive windows (in seconds)
shift_secs = win_test_secs - overlap_secs
index_channel = 1 # Index of the channnel to be used (with the Muse, we
# can choose from 0 to 3)
# Get name of features
names_of_features = BCIw.feature_names(params['names of channels'])
#%% Initialize the buffers for storing raw EEG and features
# Initialize raw EEG data buffer (for plotting)
eeg_buffer = np.zeros((params['sampling frequency']*eeg_buffer_secs, len(params['names of channels'])))
# Compute the number of windows in "eeg_buffer_secs" (used for plotting)
n_win_test = int(np.floor((eeg_buffer_secs - win_test_secs) / float(shift_secs) + 1))
# Initialize the feature data buffer (for plotting)
feat_buffer = np.zeros((n_win_test, len(names_of_features)))
# Initialize the plots
plotter_eeg = BCIw.dataPlotter(params['sampling frequency']*eeg_buffer_secs,
params['names of channels'],
params['sampling frequency'])
plotter_feat = BCIw.dataPlotter(n_win_test,
names_of_features,
1 / float(shift_secs))
#%% Start pulling data
mules_client.flushdata() # Flush old data from MuLES
BCIw.beep() # Beep sound
# The try/except structure allows to quit the while loop by aborting the
# script with <Ctrl-C>
print(' Press Ctrl-C in the console to break the While Loop')
try:
# The following loop does what we see in the diagram of Exercise 1:
# acquire data, compute features, visualize the raw EEG and the features
while True:
""" 1- ACQUIRE DATA """
eeg_data = mules_client.getdata(shift_secs, False) # Obtain EEG data from MuLES
eeg_buffer = BCIw.updatebuffer(eeg_buffer, eeg_data) # Update EEG buffer
""" 2- COMPUTE FEATURES """
# Get newest samples from the buffer
data_window = BCIw.getlastdata(eeg_buffer, win_test_secs * params['sampling frequency'])
# Compute features on "data_window"
feat_vector = BCIw.compute_feature_vector(data_window, params['sampling frequency'])
feat_buffer = BCIw.updatebuffer(feat_buffer, np.asarray([feat_vector])) # Update the feature buffer
""" 3- VISUALIZE THE RAW EEG AND THE FEATURES """
plotter_eeg.updatePlot(eeg_buffer) # Plot EEG buffer
plotter_feat.updatePlot((feat_buffer)) # Plot the feature buffer
plt.pause(0.001)
except KeyboardInterrupt:
mules_client.disconnect() # Close connection |
izberg-marketplace/django-izberg | django_iceberg/models/user_models.py | Python | mit | 3,201 | 0.017182 | # -*- coding: utf-8 -*-
import json
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from .base_models import IcebergBaseModel
from icebergsdk.api import IcebergAPI
from django_iceberg.conf import ConfigurationDebug, ConfigurationDebugSandbox,\
ConfigurationSandbox, ConfigurationSandboxStage,\
ConfigurationStage, ConfigurationProd
class UserIcebergModel(IcebergBaseModel):
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
iceberg_username = models.CharField(max_length = 255, null = True, blank = True)
access_token = models.CharField(max_length = 255, null = True, blank = True)
sso_data = models.TextField(_('Single Sign On Data'),
null = True, blank = True, help_text = _('Will keep SSO data for fast access'))
application_namespace = models.CharField(_('Application Namespace'),
max_length = 255, null = True, blank = True,
help_text = _('Allow Connection with several Applications'))
language = models.CharField(default="en", max_length = 10) # ISO
# Shopping Preference
shipping_country = models.CharField(default="FR", max_length = 10) # ISO
currency = models.CharField(default="EUR", max_length = 3) # ISO
def switch_env(self, new_env):
self.environment = new_env
## clearing sso data that wont be valid anymore on new_env
self.sso_data = None
self.access_token = None
self.save()
def get_conf(self):
enviro = self.environment
if getattr(settings, 'ICEBERG_USE_LOCAL', False):
if not enviro:
enviro = getattr(settings, 'ICEBERG_DEFAULT_ENVIRO', None)
if enviro == UserIcebergModel.ICEBERG_SANDBOX:
conf = ConfigurationDebugSandbox
else:
conf = ConfigurationDebug
else:
if not enviro:
enviro = getattr(settings, 'ICEBERG_DEFAULT_ENVIRO', None)
if enviro == UserIcebergModel.ICEBERG_SANDBOX:
conf = ConfigurationSandbox
elif enviro == UserIcebergModel.ICEBERG_SANDBOX_STAGE:
conf = ConfigurationSandboxStage
elif enviro == UserIcebergModel.ICEBERG_STAGE:
conf = ConfigurationStage
else: # None or UserIcebergModel.ICEBERG_PROD
conf = ConfigurationProd
return conf
def get_api_handler(self):
conf = self.get_conf()
api_handler = IcebergAPI(conf = conf, username = self.iceberg_u | sername, access_token = self.access_token)
return api_handler
def sync_shopping_preferences(self):
"""
Will | forward shopping preference to Iceberg
"""
api_handler = self.get_api_handler()
api_handler._sso_response = json.loads(self.sso_data)
shopping_preference = api_handler.me().shopping_preference
shopping_preference.country = api_handler.Country.search({"code": self.shipping_country})[0][0]
shopping_preference.currency = self.currency
shopping_preference.save()
|
ad-m/sledzenie_listow | sledzenie_listow/settings.py | Python | bsd-3-clause | 1,203 | 0.000831 | # -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get('SLEDZENIE_LISTOW_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Pu | t the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "mem | cached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
|
stoq/stoqdrivers | stoqdrivers/printers/base.py | Python | lgpl-2.1 | 6,610 | 0.000605 | # -*- Mode: Python; coding: iso-8859-1 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Stoqdrivers
# Copyright (C) 2005 Async Open Source <http://www.async.com.br>
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
# USA.
#
# Author(s): Henrique Romano <henrique@async.com.br>
#
"""
Generic base class implementation for all printers
"""
from zope.interface import providedBy, implementer
from stoqdrivers.base import BaseDevice
from stoqdrivers.enum import DeviceType
from stoqdrivers.interfaces import (ICouponPrinter,
IDriverConstants,
IChequePrinter,
INonFiscalPrinter)
from stoqdrivers.serialbase import SerialBase
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.usbbase import UsbBase
from stoqdrivers.utils import get_obj_from_module
_ = stoqdrivers_gettext
_NoDefault = object()
@implementer(IDriverConstants)
class BaseDriverConstants:
# Must be defined on subclasses
_constants = None
@classmethod
def get_items(cls):
return list(cls._constants.keys())
@classmethod
def get_value(cls, identifier, default=_NoDefault):
try:
return cls._constants[identifier]
except KeyError:
if default is not _NoDefault:
return default
raise ValueError("The constant identifier %r "
"isn't valid", identifier)
class BasePrinter(BaseDevice):
device_dirname = "printers"
device_type = DeviceType.PRINTER
def check_interfaces(self):
driver_interfaces = providedBy(self._driver)
if (ICouponPrinter not in driver_interfaces
and IChequePrinter not in driver_interfaces
and INonFiscalPrinter not in driver_interfaces):
raise TypeError("The driver `%r' doesn't implements a valid "
"interface" % self._driver)
def get_constants(self):
return self._driver.get_constants()
def get_tax_constant(self, item):
for enum, constant, value in self.get_tax_constants():
if enum == item:
return constant
def get_model_name(self):
return self._driver.model_name
def get_virtual_printer():
from stoqdrivers.printers.fiscal import FiscalPrinter
return FiscalPrinter(brand='virtual', model='Simple')
def get_supported_printers(include_virtual=False):
result = {}
config = [
('bematech', ['DP20C', 'MP20', 'MP2100', 'MP2100TH', 'MP4200TH', 'MP25']),
('daruma', ['DR700', 'FS2100', 'FS345', 'FS600MFD']),
('dataregis', ['EP375', 'Quick']),
('elgin', ['I9', 'KFiscal']),
('tanca', ['TP650']),
('epson', ['FBII', 'FBIII', 'TMT20', 'TMT70']),
('fiscnet', ['FiscNetECF']),
('perto', ['Pay2023']),
('snbc', ['BKC310']),
('sweda', ['SI150']),
]
if include_virtual:
config.append(('virtual', ['Simple']))
for brand, module_names in config:
result[brand] = []
for module_name in module_names:
full_module_name = "stoqdrivers.printers.%s.%s" % (brand, module_name)
obj = get_obj_from_module(full_module_name, obj_name=module_name)
if not hasattr(obj, 'supported'):
continue
result[brand].append(obj)
return result
def get_supported_printers_by_iface(interface, protocol=None,
include_virtual=False):
""" Returns all the printers that supports the interface. The result
format is the same for get_supported_printers.
@param interface: The interface the printer implements
(ICouponPrinter, IChequePrinter or INonFiscalPrinter)
@param protocol: The protocol in which the printer is connected
(None (all protocols), u | sb, serial or ethernet)
@param include_virtual: If the virtual printer (for development) should be
| included in the results
"""
# Select the base class depending on which interface has been chosen
# TODO: Implement Ethernet interface support
base_class = {
'usb': UsbBase,
'serial': SerialBase,
'ethernet': None,
None: object,
}[protocol]
if interface not in (ICouponPrinter, IChequePrinter, INonFiscalPrinter):
raise TypeError("Interface specified (`%r') is not a valid "
"printer interface" % interface)
all_printers_supported = get_supported_printers(include_virtual=include_virtual)
result = {}
for model, driver_list in all_printers_supported.items():
drivers = []
for driver in driver_list:
if interface.implementedBy(driver) and issubclass(driver, base_class):
drivers.append(driver)
if drivers:
result[model] = drivers
return result
def get_usb_printer_devices():
""" List all printers connected via USB """
try:
import usb.core
except ImportError:
# No pyusb > 1.0.0 support, return an empty list
return []
def is_printer(device):
""" Tests whether a device is a printer or not """
# Devices with either bDeviceClass == 7 or bInterfaceClass == 7 are
# printers
if device.bDeviceClass == 7:
return True
try:
for configuration in device:
for interface in configuration:
if interface.bInterfaceClass == 7:
return True
return False
except Exception:
pass
return list(usb.core.find(find_all=True, custom_match=is_printer))
def get_baudrate_values():
""" Returns baudrate values to configure the communication speed with
serial port.
"""
return ['4800', '9600', '19200', '38400', '57600', '115200']
|
ahmedaljazzar/edx-platform | openedx/core/djangoapps/credit/tests/test_signals.py | Python | agpl-3.0 | 5,259 | 0.002282 | """
Tests for minimum grade requirement status
"""
import ddt
import pytz
from datetime import timedelta, datetime
from mock import MagicMock
from django.test.client import RequestFactory
from course_modes.models import CourseMode
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from openedx.core.djangoapps.credit.api import (
set_credit_requirements, get_credit_requirement_status
)
from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider
from openedx.core.djangoapps.credit.signals import listen_for_grade_calculation
from openedx.core.djangolib.testing.utils import skip_unless_lms
@skip_unless_lms
@ddt.ddt
class TestMinGradedRequirementStatus(ModuleStoreTestCase):
"""Test cases to check the minimum grade requirement status updated.
If user grade is above or equal to min-grade then status will be
satisfied. But if student grade is less than and deadline is passed then
user will be marked as failed.
"""
VALID_DUE_DATE = datetime.now(pytz.UTC) + timedelta(days=20)
EXPIRED_DUE_DATE = datetime.now(pytz.UTC) - timedelta(days=20)
shard = 2
DATES = {
'valid': VALID_DUE_DATE,
'expired': EXPIRED_DUE_DATE,
None: None,
}
def setUp(self):
super(TestMinGradedRequirementStatus, self).setUp()
self.course = CourseFactory.create(
org='Robot', number='999', display_name='Test Course'
)
self.user = UserFactory()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.client.login(username=self.user.username, password=self.user.password)
# Enable the course for credit
CreditCourse.objects.create(
course_key=self.course.id,
enabled=True,
)
# Configure a credit provider for the course
CreditProvider.objects.create(
provider_id="ASU",
enable_integration=True,
provider_url="https://credit.example.com/request",
)
requirements = [{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.52},
}]
# Add a single credit requirement (final grade)
set_credit_requirements(self.course.id, requirements)
# Enroll user in verified mode.
self.enrollment = CourseEnrollment.enroll(self.user, self.course.id, mode=CourseMode.VERIFIED)
def assert_requirement_status(self, grade, due_date, expected_status):
""" Verify the user's credit requirement status is as expected after simulating a grading calculation. """
course_grade = MagicMock()
course_grade.percent = grade
listen_for_grade_calculation(None, self.user, course_grade, self.course.id, due_date)
req_status = get_credit_requirement_status(self.course.id, self.request.user.username, 'grade', 'grade')
self.assertEqual(req_status[0]['status'], expected_status)
if expected_status == 'satisfied':
expected_reason = {'final_grade': grade}
self.assertEqual(req_status[0]['reason'], expected_reason)
@ddt.data(
(0.6, 'valid'),
(0.52, None),
)
@ddt.unpack
def test_min_grade_requirement_with_valid_grade(self, grade, due_date_name):
"""Test with valid grades submitted before deadline"""
self.assert_requirement_status(grade, self.DATES[due_date_name], 'satisfied')
def test_grade_changed(self):
""" Verify successive calls to update a satisfied grade requirement are recorded. """
self.assert_requirement_status(0.6, self.VALID_DUE_DATE, 'satisfied')
self.assert_requirement_status(0.75, self.VALID_DUE_DATE, 'satisfied')
self.assert_requirement_status(0.70, self.VALID_DUE_DATE, 'satisfied')
def test_min_grade_requirement_with_valid_grade_and_expired_deadline(self):
""" Verify the status is set to failure if a passing grade is received past the submission deadline. """
self.assert_requirement_status(0.70, self.EXPIRED_DUE_DATE, 'failed')
@ddt.data(
(0.50, None),
(0.51, None),
(0.40, 'valid'),
)
@ddt.unpack
def test_min_grade_requirement_failed_grade_valid_deadline(self, grade, due_date_name):
"""Test with failed grades and deadline is still open or not defined."""
self.assert_requirement_status(grade, self.DATES[due_date_name], None)
def test_min_grade_requirement_failed_grade_expired_deadline(self) | :
"""Test with failed grades and deadline expire"""
self.assert_requirement_status(0.22, self.EXPIRED_DUE_DATE, 'failed')
@ddt.data(
CourseMode.AUDIT,
CourseMode.HONOR,
CourseMode.CREDIT_MODE
)
def test_requirement_failed_for_non_verified_enrollment(self, mode):
"""Test with valid grades submitted before dead | line with non-verified enrollment."""
self.enrollment.update_enrollment(mode, True)
self.assert_requirement_status(0.8, self.VALID_DUE_DATE, None)
|
idea4bsd/idea4bsd | python/testData/refactoring/move/function/before/src/a.py | Python | apache-2.0 | 353 | 0.002833 | from lib1 import urlopen
def f(url):
'''Retur | n the representation available at the URL.
'''
return urlopen(url).read()
def f_usage():
return f(14)
class C(object):
def g(self, x):
return x
class D(C):
def g(self, x, | y):
return super(D, self).f(x) + y
class E(object):
def g(self):
return -1 |
TeamBasedLearning/Service | pgtbl/accounts/tests/test_read_user.py | Python | gpl-3.0 | 2,537 | 0 | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from accounts.models import User
from accounts.serializers import UserSerializer, UserRegisterSerializer
class ReadUserTestCase(APITestCase):
"""
Test to show all or a single user of the system.
"""
def setUp(self):
"""
This method will run before any test.
"""
self.superuser = User.objects.create_superuser(
name='Victor Arnaud',
email='victorhad@gmail.com',
password='victorhad123456'
)
self.user = User.objects.create(
name='Pedro Calile',
email='pedro@gmail.com',
password='pedro123456'
)
self.client.force_authenticate(self.user)
def tearDown(self):
"""
This method will run after any test.
"""
self.superuser.delete()
self.client.logout()
self.user.delete()
def test_valid_user_list(self):
"""
Test found the user list.
"""
users = User.objects.all()
serializer = UserRegisterSerializer(users, many=True)
url = reverse('accounts:list-create')
response = self.client.get(url)
self.assertEquals(User.objects.count(), 2)
self.assertEquals(response.data, serializer.data)
self.assertEquals(response.status_code, status.HTTP_200_OK)
def test_valid_own_user_detail(self):
"""
Test found the own user.
"""
url = reverse('accounts:details', kwargs={'pk': self.user.pk})
serializer = UserSerializer(self.user)
response = self.client.get(url)
self.assertEquals(response.data, serializer.data)
self.assertEquals(response.status_code, status.HTTP_200_OK)
def test_valid_another_user_detail(self):
"""
Test found the specific user.
"""
url = reverse('accounts:details', kwargs={'pk': self.superuser.pk})
serializer = UserSerializer(self.superuser)
response = self.client | .get(url)
self.assertEquals(response.data, serializer.data)
self.assertEquals(response.status_code, status.HTTP_200_OK)
def test_invalid_url_user_detail(self):
"""
Test to not found the specific user.
"""
| url_invalid = reverse('accounts:details', kwargs={'pk': 30})
response = self.client.get(url_invalid)
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
|
pedropva/AIprojects | sources/Data.py | Python | gpl-3.0 | 5,965 | 0.053646 | from Node import Node
from Utils import Utils
class Data:
"""description of class"""
def __init__(self):
pass
@staticmethod
def cities():
neighbors = {'Arad':('Sibiu','Timisoara','Zerind'),'Zerind':('Arad','Oradea'),'Oradea':('Sibiu','Zerind'),'Timisoara':('Arad','Lugoj'),'Lugoj':('Mehadia','Timisoara'),'Mehadia':('Drobeta','Lugoj'),'Drobeta':('Craiova','Mehadia'),'Craiova':('Drobeta','Pitesti','Rimnicu'),'Sibiu':('Arad','Fagaras','Oradea','Rimnicu'),'Fagaras':('Bucareste','Sibiu'),'Rimnicu':('Craiova','Pitesti','Sibiu'),'Pitesti':('Bucareste','Craiova','Rimnicu'),'Neamt':('Iasi',),'Iasi':('Neamt','Vaslui'),'Vaslui':('Iasi','Urziceni'),'Bucareste':('Fagaras','Giurgiu','Pitesti','Urziceni'),'Giurgiu':('Bucareste',),'Urziceni':('Bucareste','Hirsova','Vaslui'),'Hirsova':('Eforie','Urziceni'),'Eforie':('Hirsova',)}
distances = {'Arad':(140,118,75),'Zerind':(75,71),'Oradea':(151,71),'Timisoara':(118,111),'Lugoj':(70,111),'Mehadia':(75,70),'Drobeta':(120,75),'Craiova':(120,138,146),'Sibiu':(140,99,151,80),'Fagaras':(211,99),'Rimnicu':(146,97,80),'Pitesti':(101,138,97),'Neamt':(87,),'Iasi':(87,92),'Vaslui':(92,142),'Bucareste':(211,90,101,85),'Giurgiu':(90,),'Urziceni':(85,98,142),'Hirsova':(86,98),'Eforie':(86,)}
expectations ={"Arad" : 366,"Bucareste" : 0,"Craiova" : 160,"Drobeta" : 242,"Eforie" : 161,"Fagaras" : 176,"Giurgiu" : 77,"Hirsova" : 151,"Iasi" : 226,"Lugoj" : 244,"Mehadia" : 241,"Neamt" : 234,"Oradea" : 380,"Pitesti" : 100,"Rimnicu" : 193,"Sibiu" : 253,"Timisoara" : 329,"Urziceni" : 80,"Vaslui" : 199,"Zerind" : 374}
cities= {}
for i in neighbors.keys():
cities[i] = Node(i,0)
for city in cities.keys():
cities[city].setExpectation(expectations[city])
for neighbor in neighbors[city]:
cities[city].addNeighbor(cities[neighbor],distances[city][neighbors[city].index(neighbor)])
return cities
@staticmethod
def cfg():
states ={"bcfg|":("fg|bc","cfg|b"),"cfg|b":("bcfg|",),"fg|bc":("bfg|c","bcfg|"),"bfg|c":("g|bcf","f|bcg","fg|bc"),"g|bcf":("bfg|c","bcg|f"),"bcg|f":("c|bfg","g|bcf"),"f|bcg":("bcf|g","bfg|c"),"bcf|g":("c|bfg","f|bcg"),"c|bfg":("bc|fg","bcf|g","bcg|f"),"bc|fg":("|bcfg","c|bfg"),"|bcfg":("bc|fg","b|cfg"),"b|cfg":("|bcfg",)}
situations = {}
for i in states.keys():
situations[i] = Node(i,0)
for s in states.keys():
for neighbor in states[s]:
situations[s].addNeighbor(situations[neighbor],1)
return situations
@staticmethod
def puzzle(matrix,objective):
puzzle = Node(matrix,1)
if(puzzle.getObjective() == None and objective != None):##if objective not set then set
puzzle.setObjective(objective)
puzzle.setExpectation(Utils.getExpectationOfMatrix(matrix,puzzle.getObjective()))
return puzzle
@staticmethod
def cfgDecode(path):
##separate left and right
count = 0
current = ""
while (count < len(path)):
current = path[count]
if(current == 'b') : print("Boatman",end = ' ')
elif(current == 'g') : print("Grains",end = ' ')
elif(current == 'c') : print("Chicken",end = ' ')
elif(current == 'f') : print("Fox",end = ' ')
elif(current == ',') : print()
else: print(current,end = '')
count += 1
@staticmethod
def cfgCode(state):
if(len(state) != 8):return False
for i in state:
if(not((i == 'b') or (i == 'c') or (i == 'f') or (i == 'g') or (i == '|') or (i == ','))):return False
state = state.split('|')
l = state[0][:]
r = state[1][:]
l = sorted(l.split(','))
r = sorted(r.split(','))
return ''.join(l) +'|'+ ''.join(r)
@staticmethod
def puzzleDecode(path,cost):
path = path.split('],[')
for l in path:
l = l.split('], [')
for r in l:
r = r.replace("]", "")
r = r.replace("[", "")
print(r)
print()
print('With cost: ',cost)
@staticmethod
def insertPriorityHeuristics(queue,node):##inserting in a priority queue comparing their wheights and their heuristics, used for aStar search
if(queue):
x = 0
for q in queue:
if(node[2]+node[0].getExpectation() < q[2]+ q[0].getExpectation()):
queue.insert(x,node)
return
x += 1
queue.app | end(node)
return
@staticmethod
def isInList(list, obj):
for item in list:
if item[0].getName() == obj.getName():##testing two nodes, i use [0] because every traveled node is a list with the node itself and the string with the path to him
return True
return False
@staticmethod
def isAValidMAtrix(matrix):
maxN = len(matrix) * len(matrix[0])
minN = 0
numbers= []
for l in matri | x:
for r in l:
if r > maxN or r < minN:
return False
if r not in numbers:
numbers.append(r)
else:
return False
return True
@staticmethod
def isSolvable(matrix,finalMatrixIsPair):
maxLenLines = len(matrix)
maxLenRows = len(matrix[0])
invertions = 0
x=0
y=0
x1=0
y1=0
for x in range(0,maxLenLines):
for y in range(0,maxLenRows):
for x1 in range(x,maxLenLines):
for y1 in range(y,maxLenRows):
if((x+y < x1+y1) and (matrix[x][y] > matrix[x1][y1])):
invertions = invertions + 1
if((invertions%2==0) == (finalMatrixIsPair)):
return True
else:
return False
|
ihaywood3/easygp | db/proda.py | Python | gpl-3.0 | 882 | 0.011338 | #!/usr/bin/python
"""
An interface to the web-based PROvider Direct Access (PRODA) system
of Medicare Australia
"""
import mechanize # available via PIP
import re
m = mechanize.Browser()
m.open("https://proda.humanservices.gov.au/prodalogin/pages/public/login.jsf?TAM_OP=l | ogin&USER")
m.select_form(name="loginFormAndStuff")
m['loginFormAndStuff:inputPassword'] = "Drc232crq838"
m['loginFormAndStuff:username'] = 'ihaywood'
m.submit()
m.select_form(nr=0)
m['otp.user.otp'] = raw_input("Emailed code")
m.submit()
print m.reply()
#m.open("https://www2.medicareaustralia.gov.au:5447/pcert/hpos/home.do")
#m.select_form(name="termsAndCond | itionsForm")
#m['action'] = "I agree"
#m.submit()
#m.follow_link(text_regex=re.compile("Claims"))
#m.follow_link(text_regex=re.compile("Make a new claim"))
#m.follow_link(text_regex=re.compile("Medicare Bulk Bill Webclaim"))
print m.read() |
devilry/devilry-django | devilry/project/develop/testhelpers/soupselect.py | Python | bsd-3-clause | 5,265 | 0.003229 | """
soupselect.py - https://code.google.com/p/soupselect/
CSS selector support for BeautifulSoup.
soup = BeautifulSoup('<html>...')
select(soup, 'div')
- returns a list of div elements
select(soup, 'div#main ul a')
- returns a list of links inside a ul inside div#main
"""
import re
from bs4 import BeautifulSoup
tag_re = re.compile('^[a-z0-9]+$')
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
def attribute_checker(operator, attribute, value=''):
"""
Takes an operator, attribute and optional value; returns a function that
will return True for elements that match that combination.
"""
return {
'=': lambda el: el.get(attribute) == value,
# attribute includes value as one of a set of space separated tokens
'~': lambda el: value in el.get(attribute, '').split(),
# attribute starts with value
'^': lambda el: el.get(attribute, '').startswith(value),
# attribute ends with value
'$': lambda el: el.get(attribute, '').endswith(value),
# attribute contains value
'*': lambda el: value in el.get(attribute, ''),
# attribute is either exactly value or starts with value-
'|': lambda el: el.get(attribute, '') == value \
or el.get(attribute, '').startswith('%s-' % value),
}.get(operator, lambda el: attribute in el)
def select(soup, selector):
"""
soup should be a BeautifulSoup instance; selector is a C | SS selector
specifying the elements you want to retrieve.
"""
tokens = selector.split()
current_context = [soup]
for | token in tokens:
m = attribselect_re.match(token)
if m:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend([el for el in context.findAll(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if not tag:
tag = True
el = current_context[0].find(tag, {'id': id})
if not el:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag, klass = token.split('.', 1)
if not tag:
tag = True
found = []
for context in current_context:
found.extend(
context.findAll(tag,
{'class': lambda attr: attr and klass in attr.split()}
)
)
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
# Here we should just have a regular tag
if not tag_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
def monkeypatch(BeautifulSoupClass=None):
"""
If you don't explicitly state the class to patch, defaults to the most
common import location for BeautifulSoup.
"""
if not BeautifulSoupClass:
from BeautifulSoup import BeautifulSoup as BeautifulSoupClass
BeautifulSoupClass.findSelect = select
def unmonkeypatch(BeautifulSoupClass=None):
if not BeautifulSoupClass:
from BeautifulSoup import BeautifulSoup as BeautifulSoupClass
delattr(BeautifulSoupClass, 'findSelect')
def cssFind(html, selector):
"""
Parse ``html`` with class:`BeautifulSoup.BeautifulSoup` and use
:func:`.select` on the result.
Added by Espen A. Kristiansen to make it even easier to use for testing.
"""
soup = BeautifulSoup(html)
return select(soup, selector)
def cssGet(html, selector):
"""
Same as :func:`.cssFind`, but returns the first match.
Added by Espen A. Kristiansen to make it even easier to use for testing.
"""
try:
return cssFind(html, selector)[0]
except IndexError as e:
raise IndexError('Could not find {}.'.format(selector))
def cssExists(html, selector):
"""
Same as :func:`.cssFind`, but returns ``True`` if the selector matches at least one item.
Added by Espen A. Kristiansen to make it even easier to use for testing.
"""
matches = cssFind(html, selector)
return bool(len(matches))
def prettyhtml(html):
return BeautifulSoup(html).prettify()
def normalize_whitespace(html):
return re.sub('(\s|\\xa0)+', ' ', html).strip()
|
tombstone/models | research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py | Python | apache-2.0 | 12,618 | 0.004755 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014).
See https://arxiv.org/abs/1505.05424 for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from absl import flags
from bandits.core.bayesian_nn import BayesianNN
FLAGS = flags.FLAGS
def log_gaussian(x, mu, sigma, reduce_sum=True):
"""Returns log Gaussian pdf."""
res = (-0.5 * np.log(2 * np.pi) - tf.log(sigma) - tf.square(x - mu) /
(2 * tf.square(sigma)))
if reduce_sum:
return tf.reduce_sum(res)
else:
return res
def analytic_kl(mu_1, sigma_1, mu_2, sigma_2):
"""KL for two Gaussian distributions with diagonal covariance matrix."""
sigma_1_sq = tf.square(sigma_1)
sigma_2_sq = tf.square(sigma_2)
t1 = tf.square(mu_1 - mu_2) / (2. * sigma_2_sq)
t2 = (sigma_1_sq/sigma_2_sq - 1. - tf.log(sigma_1_sq) + tf.log(sigma_2_sq))/2.
return tf.reduce_sum(t1 + t2)
class VariationalNeuralBanditModel(BayesianNN):
"""Implements an approximate Bayesian NN using Variational Inference."""
def __init__(self, hparams, name="BBBNN"):
self.name = name
self.hparams = hparams
self.n_in = self.hparams.context_dim
self.n_out = self.hparams.num_actions
self.layers = self.hparams.layer_sizes
self.init_scale = self.hparams.init_scale
self.f_num_points = None
if "f_num_points" in hparams:
self.f_num_points = self.hparams.f_num_points
self.cleared_times_trained = self.hparams.cleared_times_trained
self.initial_training_steps = self.hparams.initial_training_steps
self.training_schedule = np.linspace(self.initial_training_steps,
self.hparams.training_epochs,
self.cleared_times_trained)
self.verbose = getattr(self.hparams, "verbose", True)
self.weights_m = {}
self.weights_std = {}
self.biases_m = {}
self.biases_std = {}
self.times_trained = 0
if self.hparams.use_sigma_exp_transform:
self.sigma_transform = tf.exp
self.inverse_sigma_transform = np.log
else:
self.sigma_transform = tf.nn.softplus
self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y))
# Whether to use the local reparameterization trick to compute the loss.
# See details in https://arxiv.org/abs/1506.02557
self.use_local_reparameterization = True
self.build_graph()
def build_mu_variable(self, shape):
"""Returns a mean variable initialized as N(0, 0.05)."""
return tf.Variable(tf.random_normal(shape, 0.0, 0.05))
def build_sigma_variable(self, shape, init=-5.):
"""Returns a sigma variable initialized as N(init, 0.05)."""
# Initialize sigma to be very small initially to encourage MAP opt first
return tf.Variable(tf.random_normal(shape, init, 0.05))
def build_layer(self, input_x, input_x_local, shape,
layer_id, activation_fn=tf.nn.relu):
"""Builds a variational layer, and computes KL term.
Args:
input_x: Input to the variational layer.
input_x_local: Input when the local reparameterization trick was applied.
shape: [number_inputs, number_outputs] for the layer.
layer_id: Number of layer in the architecture.
activation_fn: Activation function to apply.
Returns:
output_h: Output of the variational layer.
output_h_local: Output when local reparameterization trick was applied.
neg_kl: Negative KL term for the layer.
"""
w_mu = self.build_mu_variable(shape)
w_sigma = self.sigma_transform(self.build_sigma_variable(shape))
w_noise = tf.rand | om_normal(shape)
w = w_mu + w_sigma * w_noise
b_mu = self.build_mu_variable([1, shape[1]])
b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]]))
| b = b_mu
# Store means and stds
self.weights_m[layer_id] = w_mu
self.weights_std[layer_id] = w_sigma
self.biases_m[layer_id] = b_mu
self.biases_std[layer_id] = b_sigma
# Create outputs
output_h = activation_fn(tf.matmul(input_x, w) + b)
if self.use_local_reparameterization:
# Use analytic KL divergence wrt the prior
neg_kl = -analytic_kl(w_mu, w_sigma,
0., tf.to_float(np.sqrt(2./shape[0])))
else:
# Create empirical KL loss terms
log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0])))
log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma))
neg_kl = log_p - log_q
# Apply local reparameterization trick: sample activations pre nonlinearity
m_h = tf.matmul(input_x_local, w_mu) + b
v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma))
output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h))
output_h_local = activation_fn(output_h_local)
return output_h, output_h_local, neg_kl
def build_action_noise(self):
"""Defines a model for additive noise per action, and its KL term."""
# Define mean and std variables (log-normal dist) for each action.
noise_sigma_mu = (self.build_mu_variable([1, self.n_out])
+ self.inverse_sigma_transform(self.hparams.noise_sigma))
noise_sigma_sigma = self.sigma_transform(
self.build_sigma_variable([1, self.n_out]))
pre_noise_sigma = (noise_sigma_mu
+ tf.random_normal([1, self.n_out]) * noise_sigma_sigma)
self.noise_sigma = self.sigma_transform(pre_noise_sigma)
# Compute KL for additive noise sigma terms.
if getattr(self.hparams, "infer_noise_sigma", False):
neg_kl_term = log_gaussian(
pre_noise_sigma,
self.inverse_sigma_transform(self.hparams.noise_sigma),
self.hparams.prior_sigma
)
neg_kl_term -= log_gaussian(pre_noise_sigma,
noise_sigma_mu,
noise_sigma_sigma)
else:
neg_kl_term = 0.
return neg_kl_term
def build_model(self, activation_fn=tf.nn.relu):
"""Defines the actual NN model with fully connected layers.
The loss is computed for partial feedback settings (bandits), so only
the observed outcome is backpropagated (see weighted loss).
Selects the optimizer and, finally, it also initializes the graph.
Args:
activation_fn: the activation function used in the nn layers.
"""
if self.verbose:
print("Initializing model {}.".format(self.name))
neg_kl_term, l_number = 0, 0
use_local_reparameterization = self.use_local_reparameterization
# Compute model additive noise for each action with log-normal distribution
neg_kl_term += self.build_action_noise()
# Build network.
input_x = self.x
input_local = self.x
n_in = self.n_in
for l_number, n_nodes in enumerate(self.layers):
if n_nodes > 0:
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, n_nodes], l_number)
neg_kl_term += neg_kl
input_x, input_local = h, h_local
n_in = n_nodes
# Create last linear layer
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, self.n_out],
l_number + 1,
activation_fn=lambda x: x)
neg_kl_term += neg_kl
self.y_pred = h
|
srwalter/yap | yap/plugin.py | Python | gpl-2.0 | 34 | 0 | class YapPlu | gin(ob | ject):
pass
|
VictorLoren/Crak-card-game-python | Deck.py | Python | mit | 803 | 0.048568 | from Card import Card
#Deck
class Deck(Card):
'''Definition of a card deck.'''
from random import shuffle as rShuffle
def __init__(self,hasJoker=False):
#Assemble deck
self.cards = [Card(v,s) for v in self.values for s in self.suits]
#Add Joker cards (2) as 'WW' if needed
#if(hasJoker):
# self.cards.extend([('W','W'),('W','W')])
#Return how many cards are in deck
def __len__(self):
return len(self.cards | )
#Draw a card from th | e deck and return a card
def draw(self,fromTop=True):
#Remove from the front/top of deck
if fromTop:
return self.cards.pop(0)
#Remove from the back/bottom of deck
else:
return self.cards.pop()
#Shuffle deck and return the newly shuffled deck
def shuffle(self):
#Use random.shuffle() method
rShuffle(self.cards)
return self.cards
|
broadinstitute/cfn-pyplates | docs/source/examples/advanced/altered_template.py | Python | mit | 899 | 0.005562 | import sys
# If our base template isn't on the PYTHONPATH already, we need to do this:
sys.path.append('../path/to/base/templates')
import basetemplate
class AlteredTemplate(basetemplate.BaseTemplate):
"""This project only needs an S3 bucket, but no EC2 server."""
def add_resources(self):
self | .add_bucket()
def add_bucket(self):
"""This will add a bucket using the base template, and then add a custom CORS
configuration to it."""
super(AlteredTemplate, self).add_bucket()
self.resources['StaticFiles']['Properties']['CorsConfiguration'] = {
'CorsRules': [
{
'AllowedHeaders': ['*' | ],
'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
}
]
}
cft = AlteredTemplate("S3 Bucket Project", options)
cft.add_resources()
|
dokterbob/slf-programming-workshops | examples/snowball/main.py | Python | mit | 1,795 | 0 | from snowball.utils import SnowMachine
from snowball.climate import WeatherProbe
# Note: multiline import limits line length
from snowball.water.phases import (
WaterVapor, IceCrystal, SnowFlake
)
def let_it_snow():
"""
Makes it snow, using a SnowMachine when weather doesn't allow it.
Returns a list of SnowFlakes.
Example::
>>> let_it_snow()
The snow machine is broken. No snow today. :/
[]
>>> let_it_snow()
[<snowball.water.phases.SnowFlake object at 0x101dbc210>,
<snowball.water.phases.SnowFlake object at 0x101dbc350>,
<snowball.water.phases.SnowFlake object at 0x101dbc1d0>,
<snowball.water | .phases.SnowFlake object at 0x101dbc190>,
<snowball.water.phases.SnowFlake object at 0x101dbc3d0>,
<snowball.water.phases.SnowFlake object at 0x101dbc410>,
<snowball.water.phases.SnowFlake object at 0x101dbc450>,
<snowball.water.phases.SnowFlake object at 0x101 | dbc390>,
<snowball.water.phases.SnowFlake object at 0x101dbc310>]
"""
# Create a WeatherProbe
weather_probe = WeatherProbe()
if weather_probe.temperature < 0 and weather_probe.clouds:
# There's clouds and it's cold enough
# Create necessary components
vapor = WaterVapor()
ice = IceCrystal()
# Start with empty list of flakes
snow_flakes = []
# Now create 10 snowflakes
for counter in xrange(1, 10):
flake = SnowFlake(vapor, ice)
# Add flake to list
snow_flakes.append(flake)
return snow_flakes
else:
# The weather's not right, use the SnowMachine
snow_machine = SnowMachine()
snow_flakes = snow_machine.let_it_snow()
return snow_flakes
|
j-carl/ansible | test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py | Python | gpl-3.0 | 56 | 0 | thing = "hello | from formerly_testcoll_pkg.subm | od.thing"
|
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Tasking/PyScripts/Lib/tasking/netmap.py | Python | unlicense | 259 | 0.011583 |
import dsz
import os
import re
from task impo | rt *
class Netmap(Task, ):
| def __init__(self, file):
Task.__init__(self, file, 'Netmap')
def CreateCommandLine(self):
return ['netmap -minimal']
TaskingOptions['_netmapTasking'] = Netmap |
alu042/edx-platform | common/test/acceptance/tests/lms/test_learner_profile.py | Python | agpl-3.0 | 34,219 | 0.002835 | # -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
from datetime import datetime
from bok_choy.web_app_test import WebAppTest
from nose.plugins.attrib import attr
from ...pages.common.logout import LogoutPage
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username | ']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_ | unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now() # pylint: disable=attribute-defined-outside-init
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
@attr('shard_4')
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
|
marbiouk/dsmtools | Tools/GenericToolsBatchConvertRastersASCIIMXE.py | Python | mit | 4,814 | 0.003324 | #!/usr/bin/env python
import sys, os, arcpy
class GenericToolsBatchConvertRastersASCIIMXE(object):
"""This class has the methods you need to define
to use your code as an ArcGIS Python Tool."""
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Batch convert directory of rasters to ASCII and MXE formats"
self.description = """Description"""
self.canRunInBackground = False
self.category = "Generic Tools" # Use your own category here, or an existing one.
def getParameterInfo(self):
params = []
input_directory = arcpy.Parameter(name="input_directory",
displayName="Directory of rasters",
datatype="DEWorkspace",
parameterType="Required",
direction="Input",
)
input_directory.value = r"D:\0_GEBCO\Geographic_Final"
params.append(input_directory)
output_directory = arcpy.Parameter(name="output_directory",
displayName="Output directory",
datatype="DEWorkspace",
parameterType="Required", # Required|Optional|Derived
direction="Output", # Input|Output
)
output_directory.value = r"G:\Model_Data\GlobEnv\GEBCO14-Based"
params.append(output_directory)
out_mxe = arcpy.Parameter(name="out_mxe",
displayName="Do you want to create MXE for Maxent?",
datatype="GPBoolean",
parameterType="Required",
direction="Input",
)
out_mxe.ValueList = ["True", "False"]
out_mxe.value = "True"
params.append(out_mxe)
path_mxe = arcpy.Parameter(name="path_mxe",
displayName="Path to Maxent jar",
datatype="DEFolder",
parameterType="Required",
direction="Input",
)
path_mxe.value = "D:\Example"
params.append(path_mxe)
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
#def updateParameters(self, parameters):
#return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
try:
path_mxe = parameters[3].valueAsText
os.path.exists(os.path.join(path_mxe, "maxent.jar"))
if not os.path.exists(os.path.join(path_mxe, "maxent.jar")):
parameters[3].setErrorMessage("Maxent Jar not found in directory ")
except Exception:
pass
return
def execute(self, parameters, messages):
arcpy.AddMessage("Batch convert rasters to ASCII and MXE formats")
for param in parameters:
arcpy.AddMessage("Parameter: %s = %s" % (param.name, param.valueAsText) )
input_directory = parameters[0].valueAsText
output_directory = parameters[1].valueAsText
out_mxe = parameters[2].valueAsText
path_mxe = parameters[3].valueAsText
if not os.path.exists(output_directory):
os.makedirs(output_directory)
arcpy.env.workspace = input_directory
rasterlist = arcpy.ListRasters("*")
arcpy.AddMessage("There are " + str(len(rasterlist)) + " rasters to process.")
| for raster in rasterlist:
arcpy.AddMessage("Converting " + str(raster) + ".")
if not arcpy.Exists(os.path.join(output_directory, raster + ".asc")):
arcpy.RasterToASCII_conversion(raster, os.path.join(output_directory, raster + ".asc"))
if out_mxe:
command = 'java -cp " | ' + os.path.join(path_mxe, "maxent.jar") + '" density.Convert ' + \
output_directory + " asc " + output_directory + " mxe"
arcpy.AddMessage("Calling Maxent to convert ASCII to MXE: " + str(command))
os.system(str(command))
return
def main():
tool = GenericToolsBatchConvertRastersASCIIMXE()
tool.execute(tool.getParameterInfo(), None)
if __name__ == '__main__':
main()
|
SINGROUP/pycp2k | pycp2k/classes/_dirichlet_bc2.py | Python | lgpl-3.0 | 2,187 | 0.001372 | from pycp2k.inputsection import InputSection
from ._aa_planar2 import _aa_planar2
from ._planar2 import _planar2
from ._aa_cylindrical2 import _aa_cylindrical2
from ._aa_cuboidal2 import _aa_cuboidal2
class _dirichlet_bc2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Verbose_output = None
self.AA_PLANAR_list = []
self.PLANAR_list = []
self.AA_CYLINDRICAL_list = []
self.AA_CUBOIDAL_list = []
self._name = "DIRICHLET_BC"
self._keywords = {'Verbose_output': 'VERBOSE_OUTPUT'}
self._repeated_subsections = {'AA_CYLINDRICAL': '_aa_cylindrical2', 'PLANAR': '_planar2', 'AA_PLANAR': '_aa_planar2', 'AA_CUBOIDAL': '_aa_cuboidal2'}
self._attributes = ['AA_PLANAR_list', 'PLANAR_list', 'AA_CYLINDRICAL_list', 'AA_CUBOIDAL_list']
def AA_CYLINDRICAL_add(self, section_parameters=None):
new_section = _aa_cylindrical2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = sec | tion_parameters
self.AA_CYLINDRICAL_list.append(new_section)
return new_section
def PLANA | R_add(self, section_parameters=None):
new_section = _planar2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.PLANAR_list.append(new_section)
return new_section
def AA_PLANAR_add(self, section_parameters=None):
new_section = _aa_planar2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.AA_PLANAR_list.append(new_section)
return new_section
def AA_CUBOIDAL_add(self, section_parameters=None):
new_section = _aa_cuboidal2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.AA_CUBOIDAL_list.append(new_section)
return new_section
|
jules185/IoT_Hackathon | .homeassistant/deps/fuzzywuzzy/__init__.py | Python | mit | 47 | 0 | # -*- coding: utf-8 -*-
__version | __ = | '0.15.0'
|
ClearingHouse/clearblockd | lib/components/assets.py | Python | mit | 14,545 | 0.008869 | import os
import logging
import decimal
import base64
import json
from datetime import datetime
from lib import config, util, util_bitcoin
ASSET_MAX_RETRY = 3
D = decimal.Decimal
def parse_issuance(db, message, cur_block_index, cur_block):
if message['status'] != 'valid':
return
def modify_extended_asset_info(asset, description):
"""adds an asset to asset_extended_info collection if the description is a valid json link. or, if the link
is not a valid json link, will remove the asset entry from the table if it exists"""
if util.is_valid_url(description, suffix='.json', allow_no_protocol=True):
db.asset_extended_info.update({'asset': asset},
{'$set': {
'info_url': description,
'info_status': 'needfetch',
'fetch_info_retry': 0, # retry ASSET_MAX_RETRY times to fetch info from info_url
'info_data': {},
'errors': []
}}, upsert=True)
#^ valid info_status settings: needfetch, valid, invalid, error
#additional fields will be added later in events, once the asset info is pulled
else:
db.asset_extended_info.remove({ 'asset': asset })
#remove any saved asset image data
imagePath = os.path.join(config.DATA_DIR, config.SUBDIR_ASSET_IMAGES, asset + '.png')
if os.path.exists(imagePath):
| os.remove(imagePath)
tracked_asset = db.tracked_assets.find_one(
{'asset': message['asset']}, {'_id': 0, '_histor | y': 0})
#^ pulls the tracked asset without the _id and history fields. This may be None
if message['locked']: #lock asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'locked',
'locked': True,
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Locking asset %s" % (message['asset'],))
elif message['transfer']: #transfer asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'transferred',
'owner': message['issuer'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Transferring asset %s to address %s" % (message['asset'], message['issuer']))
elif message['quantity'] == 0 and tracked_asset is not None: #change description
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'changed_description',
'description': message['description'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
modify_extended_asset_info(message['asset'], message['description'])
logging.info("Changing description for asset %s to '%s'" % (message['asset'], message['description']))
else: #issue new asset or issue addition qty of an asset
if not tracked_asset: #new issuance
tracked_asset = {
'_change_type': 'created',
'_at_block': cur_block_index, #the block ID this asset is current for
'_at_block_time': cur_block['block_time_obj'],
#^ NOTE: (if there are multiple asset tracked changes updates in a single block for the same
# asset, the last one with _at_block == that block id in the history array is the
# final version for that asset at that block
'asset': message['asset'],
'owner': message['issuer'],
'description': message['description'],
'divisible': message['divisible'],
'locked': False,
'total_issued': message['quantity'],
'total_issued_normalized': util_bitcoin.normalize_quantity(message['quantity'], message['divisible']),
'_history': [] #to allow for block rollbacks
}
db.tracked_assets.insert(tracked_asset)
logging.info("Tracking new asset: %s" % message['asset'])
modify_extended_asset_info(message['asset'], message['description'])
else: #issuing additional of existing asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'issued_more',
},
"$inc": {
'total_issued': message['quantity'],
'total_issued_normalized': util_bitcoin.normalize_quantity(message['quantity'], message['divisible'])
},
"$push": {'_history': tracked_asset} }, upsert=False)
logging.info("Adding additional %s quantity for asset %s" % (
util_bitcoin.normalize_quantity(message['quantity'], message['divisible']), message['asset']))
return True
def inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, new_status='error', errors=[]):
asset['fetch_info_retry'] += 1
asset['errors'] = errors
if asset['fetch_info_retry'] == max_retry:
asset['info_status'] = new_status
db.asset_extended_info.save(asset)
def sanitize_json_data(data):
data['asset'] = util.sanitize_eliteness(data['asset'])
if 'description' in data: data['description'] = util.sanitize_eliteness(data['description'])
if 'website' in data: data['website'] = util.sanitize_eliteness(data['website'])
if 'pgpsig' in data: data['pgpsig'] = util.sanitize_eliteness(data['pgpsig'])
return data
def process_asset_info(db, asset, info_data):
# sanity check
assert asset['info_status'] == 'needfetch'
assert 'info_url' in asset
assert util.is_valid_url(asset['info_url'], allow_no_protocol=True) #already validated in the fetch
errors = util.is_valid_json(info_data, config.ASSET_SCHEMA)
if not isinstance(info_data, dict) or 'asset' not in info_data:
errors.append('Invalid data format')
elif asset['asset'] != info_data['asset']:
errors.append('asset field does not match asset name')
if len(errors) > 0:
inc_fetch_retry(db, asset, new_status='invalid', errors=errors)
return (False, errors)
asset['info_status'] = 'valid'
#fetch any associated images...
#TODO: parallelize this 2nd level asset image fetching ... (e.g. just compose a list here, and process it in later on)
if 'image' in info_data:
info_data['valid_image'] = util.fetch_image(info_data['image'],
config.SUBDIR_ASSET_IMAGES, asset['asset'], fetch_timeout=5)
asset['info_data'] = sanitize_json_data(info_data)
db.asset_extended_info.save(asset)
return (True, None)
def fetch_all_asset_info(db):
assets = list(db.asset_extended_info.find({'info_status': 'needfetch'}))
asset_info_urls = []
def asset_fetch_complete_hook(urls_data):
logging.info("Enhanced asset info fetching complete. %s unique URLs fetched. Processing..." % len(urls_data))
for asset in assets:
logging.debug("Looking at asset %s: %s" % (asset, asset['info_url']))
if asset['info_url']:
info_url = ('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url']
assert info_url in urls_data
if not urls_data[info_url][0]: #re |
stoivo/GitSavvy | core/interfaces/status.py | Python | mit | 26,371 | 0.001213 | import os
import sublime
from sublime_plugin import WindowCommand, TextCommand
from ..commands import *
from ...common import ui
from ..git_command import GitCommand
f | rom ...common import util
class GsShowStatusCommand(WindowCommand, GitCommand):
"""
Open a status view for the active git repository.
"""
def run(self):
StatusInterface(repo_path=self.repo_path)
class StatusInterface(ui.Interface, GitCommand):
"""
Status dashboard.
"""
interface_type = "status"
read_only = True
syntax_ | file = "Packages/GitSavvy/syntax/status.sublime-syntax"
word_wrap = False
tab_size = 2
template = """\
BRANCH: {branch_status}
ROOT: {git_root}
HEAD: {head}
{< unstaged_files}
{< untracked_files}
{< staged_files}
{< merge_conflicts}
{< no_status_message}
{< stashes}
{< help}
"""
template_help = """
################### ###############
## SELECTED FILE ## ## ALL FILES ##
################### ###############
[o] open file [a] stage all unstaged files
[s] stage file [A] stage all unstaged and untracked files
[u] unstage file [U] unstage all staged files
[d] discard changes to file [D] discard all unstaged changes
[h] open file on remote
[M] launch external merge tool for conflict
[l] diff file inline [f] diff all files
[e] diff file [F] diff all cached files
############# #############
## ACTIONS ## ## STASHES ##
############# #############
[c] commit [t][a] apply stash
[C] commit, including unstaged [t][p] pop stash
[m] amend previous commit [t][s] show stash
[p] push current branch [t][c] create stash
[t][u] create stash including untracked files
[i] ignore file [t][g] create stash of staged changes only
[I] ignore pattern [t][d] discard stash
###########
## OTHER ##
###########
[r] refresh status
[?] toggle this help menu
[tab] transition to next dashboard
[SHIFT-tab] transition to previous dashboard
[.] move cursor to next file
[,] move cursor to previous file
-
"""
template_staged = """
STAGED:
{}
"""
template_unstaged = """
UNSTAGED:
{}
"""
template_untracked = """
UNTRACKED:
{}
"""
template_merge_conflicts = """
MERGE CONFLICTS:
{}
"""
template_stashes = """
STASHES:
{}
"""
def title(self):
return "STATUS: {}".format(os.path.basename(self.repo_path))
def pre_render(self):
(self.staged_entries,
self.unstaged_entries,
self.untracked_entries,
self.conflict_entries) = self.sort_status_entries(self.get_status())
def on_new_dashboard(self):
self.view.run_command("gs_status_navigate_file")
@ui.partial("branch_status")
def render_branch_status(self):
return self.get_branch_status(delim="\n ")
@ui.partial("git_root")
def render_git_root(self):
return self.short_repo_path
@ui.partial("head")
def render_head(self):
return self.get_latest_commit_msg_for_head()
@ui.partial("staged_files")
def render_staged_files(self):
if not self.staged_entries:
return ""
def get_path(file_status):
""" Display full file_status path, including path_alt if exists """
if file_status.path_alt:
return '{} -> {}'.format(file_status.path_alt, file_status.path)
return file_status.path
return self.template_staged.format("\n".join(
" {} {}".format("-" if f.index_status == "D" else " ", get_path(f))
for f in self.staged_entries
))
@ui.partial("unstaged_files")
def render_unstaged_files(self):
if not self.unstaged_entries:
return ""
return self.template_unstaged.format("\n".join(
" {} {}".format("-" if f.working_status == "D" else " ", f.path)
for f in self.unstaged_entries
))
@ui.partial("untracked_files")
def render_untracked_files(self):
if not self.untracked_entries:
return ""
return self.template_untracked.format(
"\n".join(" " + f.path for f in self.untracked_entries))
@ui.partial("merge_conflicts")
def render_merge_conflicts(self):
if not self.conflict_entries:
return ""
return self.template_merge_conflicts.format(
"\n".join(" " + f.path for f in self.conflict_entries))
@ui.partial("no_status_message")
def render_no_status_message(self):
return ("\n Your working directory is clean.\n"
if not (self.staged_entries or
self.unstaged_entries or
self.untracked_entries or
self.conflict_entries)
else "")
@ui.partial("stashes")
def render_stashes(self):
stash_list = self.get_stashes()
if not stash_list:
return ""
return self.template_stashes.format("\n".join(
" ({}) {}".format(stash.id, stash.description) for stash in stash_list))
@ui.partial("help")
def render_help(self):
help_hidden = self.view.settings().get("git_savvy.help_hidden")
if help_hidden:
return ""
else:
return self.template_help
ui.register_listeners(StatusInterface)
class GsStatusOpenFileCommand(TextCommand, GitCommand):
"""
For every file that is selected or under a cursor, open a that
file in a new view.
"""
def run(self, edit):
lines = util.view.get_lines_from_regions(self.view, self.view.sel())
file_paths = (line.strip() for line in lines if line[:4] == " ")
abs_paths = (os.path.join(self.repo_path, file_path) for file_path in file_paths)
for path in abs_paths:
self.view.window().open_file(path)
class GsStatusDiffInlineCommand(TextCommand, GitCommand):
"""
For every file selected or under a cursor, open a new inline-diff view for
that file. If the file is staged, open the inline-diff in cached mode.
"""
def run(self, edit):
interface = ui.get_interface(self.view.id())
non_cached_sections = (interface.get_view_regions("unstaged_files") +
interface.get_view_regions("merge_conflicts"))
non_cached_lines = util.view.get_lines_from_regions(
self.view,
self.view.sel(),
valid_ranges=non_cached_sections
)
non_cached_files = (
os.path.join(self.repo_path, line.strip())
for line in non_cached_lines
if line[:4] == " ")
cached_sections = interface.get_view_regions("staged_files")
cached_lines = util.view.get_lines_from_regions(
self.view,
self.view.sel(),
valid_ranges=cached_sections
)
cached_files = (
os.path.join(self.repo_path, line.strip())
for line in cached_lines
if line[:4] == " ")
sublime.set_timeout_async(
lambda: self.load_inline_diff_windows(non_cached_files, cached_files), 0)
def load_inline_diff_windows(self, non_cached_files, cached_files):
for fpath in non_cached_files:
syntax = util.file.get_syntax_for_file(fpath)
settings = {
"git_savvy.file_path": fpath,
"git_savvy.repo_path": self.repo_path,
"syntax": syntax
}
self.view.window().run_comman |
Kronuz/Xapiand | contrib/python/xapiand-py/xapiand/transport.py | Python | mit | 15,871 | 0.001764 | # Copyright (c) 2019 Dubalu LLC
# Copyright (c) 2017 Elasticsearch
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to you under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | or implied.
# See the License for the specific language gover | ning permissions and
# limitations under the License.
import time
from itertools import chain
from .connection import Urllib3HttpConnection
from .connection_pool import ConnectionPool, DummyConnectionPool
from .serializer import Deserializer, DEFAULT_SERIALIZERS, DEFAULT_SERIALIZER
from .exceptions import ConnectionError, TransportError, SerializationError, ConnectionTimeout
def get_node_info(node_info, host):
"""
Simple callback that takes the node info from `GET /` and a
parsed connection information and return the connection information. If
`None` is returned this node will be skipped.
Useful for filtering nodes (by proximity for example) or if additional
information needs to be provided for the :class:`~xapiand.Connection`
class.
:arg node_info: node information from `GET /`
:arg host: connection information (host, port) extracted from the node info
"""
return host
class Transport(object):
"""
Encapsulation of transport-related to logic. Handles instantiation of the
individual connections as well as creating a connection pool to hold them.
Main interface is the `perform_request` method.
"""
def __init__(self, hosts, connection_class=Urllib3HttpConnection,
connection_pool_class=ConnectionPool, node_info_callback=get_node_info,
sniff_on_start=False, sniffer_timeout=None, sniff_timeout=.5,
sniff_on_connection_fail=False, serializer=DEFAULT_SERIALIZER, serializers=None,
default_mimetype=DEFAULT_SERIALIZER.mimetype, max_retries=3, retry_on_status=(502, 503, 504),
retry_on_timeout=False, http_method_override=False, **kwargs):
"""
:arg hosts: list of dictionaries, each containing keyword arguments to
create a `connection_class` instance
:arg connection_class: subclass of :class:`~xapiand.Connection` to use
:arg connection_pool_class: subclass of :class:`~xapiand.ConnectionPool` to use
:arg node_info_callback: callback responsible for taking the node information from
`GET /`, along with already extracted information, and
producing a list of arguments (same as `hosts` parameter)
:arg sniff_on_start: flag indicating whether to obtain a list of nodes
from the cluser at startup time
:arg sniffer_timeout: number of seconds between automatic sniffs
:arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff
:arg sniff_timeout: timeout used for the sniff request - it should be a
fast api call and we are talking potentially to more nodes so we want
to fail quickly. Not used during initial sniffing (if
``sniff_on_start`` is on) when the connection still isn't
initialized.
:arg serializer: serializer instance
:arg serializers: optional dict of serializer instances that will be
used for deserializing data coming from the server. (key is the mimetype)
:arg default_mimetype: when no mimetype is specified by the server
response assume this mimetype, defaults to `'application/x-msgpack'`
:arg max_retries: maximum number of retries before an exception is propagated
:arg retry_on_status: set of HTTP status codes on which we should retry
on a different node. defaults to ``(502, 503, 504)``
:arg retry_on_timeout: should timeout trigger a retry on different
node? (default `False`)
:arg http_method_override: for environments that don't support passing
bodies with GET requests or non-standard methods such as `MERGE`,
this option allows you to specify using `POST` and the
`X-HTTP-Method-Override` header instead.
:arg kwargs: any additional arguments will be passed on to the
:class:`~xapiand.Connection` instances.
Any extra keyword arguments will be passed to the `connection_class`
when creating and instance unless overridden by that connection's
options provided as part of the hosts parameter.
"""
# serialization config
_serializers = DEFAULT_SERIALIZERS.copy()
# if a serializer has been specified, use it for deserialization as well
_serializers[serializer.mimetype] = serializer
# if custom serializers map has been supplied, override the defaults with it
if serializers:
_serializers.update(serializers)
# create a deserializer with our config
self.deserializer = Deserializer(_serializers, default_mimetype)
self.max_retries = max_retries
self.retry_on_timeout = retry_on_timeout
self.retry_on_status = retry_on_status
self.http_method_override = http_method_override
# data serializer
self.serializer = serializer
# store all strategies...
self.connection_pool_class = connection_pool_class
self.connection_class = connection_class
# ...save kwargs to be passed to the connections
self.kwargs = kwargs
self.hosts = hosts
# ...and instantiate them
self.set_connections(hosts)
# retain the original connection instances for sniffing
self.seed_connections = self.connection_pool.connections[:]
# sniffing data
self.sniffer_timeout = sniffer_timeout
self.sniff_on_connection_fail = sniff_on_connection_fail
self.last_sniff = time.time()
self.sniff_timeout = sniff_timeout
# callback to construct host dict from data in `GET /`
self.node_info_callback = node_info_callback
if sniff_on_start:
self.sniff_hosts(True)
def add_connection(self, host):
"""
Create a new :class:`~xapiand.Connection` instance and add it to the pool.
:arg host: kwargs that will be used to create the instance
"""
self.hosts.append(host)
self.set_connections(self.hosts)
def set_connections(self, hosts):
"""
Instantiate all the connections and create new connection pool to hold them.
Tries to identify unchanged hosts and re-use existing
:class:`~xapiand.Connection` instances.
:arg hosts: same as `__init__`
"""
# construct the connections
def _create_connection(host):
# if this is not the initial setup look at the existing connection
# options and identify connections that haven't changed and can be
# kept around.
if hasattr(self, 'connection_pool'):
for (connection, old_host) in self.connection_pool.connection_opts:
if old_host == host:
return connection
# previously unseen params, create new connection
kwargs = self.kwargs.copy()
kwargs.update(host)
return self.connection_class(**kwargs)
connections = map(_create_connection, hosts)
connections = list(zip(connections, hosts))
if len(connections) == 1:
self.connection_pool = DummyConnectionPool(connections)
else:
# pass the hosts dicts to the connection pool to optionally extract parameters from
self.connection_ |
amwelch/a10sdk-python | a10sdk/core/cgnv6/cgnv6_nat64_fragmentation_df_bit_transparency.py | Python | apache-2.0 | 1,367 | 0.010241 | from a10sdk.common.A10BaseClass import A10BaseClass
class DfBitTransparency(A10BaseClass):
"""Class Description::
Add an empty IPv6 fragmentation header if IPv4 DF bit is zero (default:disabled).
Class df-bit-transparency supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param df_bit_value: {"optional": true, "enum": ["enable"], "type": "string", "description": "'enable': Add an empty IPv6 fragmentation header if IPv4 DF bit is zero; ", "format": "enum"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/nat64/fragmentation/df-bit-transparency`.
"""
def __init__(self, **kwargs):
sel | f.ERROR_MSG = ""
self.required=[]
self.b_key = "df-bit-transparency"
self.a10_url="/axapi/v3/cgnv6/nat64/fragmentation/df-bit-transparency"
self.DeviceProxy = ""
sel | f.df_bit_value = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
LLNL/spack | var/spack/repos/builtin/packages/amrvis/package.py | Python | lgpl-2.1 | 6,541 | 0.000153 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Amrvis(MakefilePackage):
"""Amrvis is a visualization package specifically designed to
read and display output and profiling data from codes built
on the AMReX framework.
"""
homepage = "https://github.com/AMReX-Codes/Amrvis"
git = "https://github.com/AMReX-Codes/Amrvis.git"
version('main', tag='main')
variant(
'dims',
default='3',
values=('1', '2', '3'),
multi=False,
description='Number of spatial dimen | sions'
)
variant(
'prec',
default='DOUBLE',
values=('FLOAT', 'DOUBLE'),
multi=False,
description='Floating point precision'
)
variant('mpi', default=True, description='Enable MPI parallel support')
variant('debug', default=False, description='Enable debugging features')
variant('profiling', default=False,
description='Enable AMReX | profiling features')
depends_on('gmake', type='build')
depends_on('mpi', when='+mpi')
depends_on('libsm')
depends_on('libice')
depends_on('libxpm')
depends_on('libx11')
depends_on('libxt')
depends_on('libxext')
depends_on('motif')
depends_on('flex')
depends_on('bison')
conflicts(
'+profiling', when='dims=1',
msg='Amrvis profiling support requires a 2D build'
)
conflicts(
'+profiling', when='dims=3',
msg='Amrvis profiling support requires a 2D build'
)
# Only doing gcc and clang at the moment.
# Intel currently fails searching for mpiicc, mpiicpc, etc.
for comp in ['%intel', '%cce', '%nag', '%pgi', '%xl', '%xl_r']:
conflicts(
comp,
msg='Amrvis currently only builds with gcc and clang'
)
# Need to clone AMReX into Amrvis because Amrvis uses AMReX's source
resource(name='amrex',
git='https://github.com/AMReX-Codes/amrex.git',
tag='development',
placement='amrex')
def edit(self, spec, prefix):
# libquadmath is only available x86_64 and powerle
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85440
if self.spec.target.family not in ['x86_64', 'ppc64le']:
comps = join_path('amrex', 'Tools', 'GNUMake', 'comps')
maks = [
join_path(comps, 'gnu.mak'),
join_path(comps, 'llvm.mak'),
]
for mak in maks:
filter_file('-lquadmath', '', mak)
# Set all available makefile options to values we want
makefile = FileFilter('GNUmakefile')
makefile.filter(
r'^AMREX_HOME\s*\?=.*',
'AMREX_HOME = {0}'.format('./amrex')
)
makefile.filter(
r'^PRECISION\s*=.*',
'PRECISION = {0}'.format(spec.variants['prec'].value)
)
makefile.filter(
r'^DIM\s*=.*',
'DIM = {0}'.format(spec.variants['dims'].value)
)
makefile.filter(
r'^PROFILE\s*=.*',
'PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^TRACE_PROFILE\s*=.*',
'TRACE_PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^COMM_PROFILE\s*=.*',
'COMM_PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^COMP\s*=.*',
'COMP = {0}'.format(self.compiler.name)
)
makefile.filter(
r'^DEBUG\s*=.*',
'DEBUG = {0}'.format(spec.variants['debug'].value).upper()
)
makefile.filter(
r'^USE_ARRAYVIEW\s*=.*',
'USE_ARRAY_VIEW = FALSE'
)
makefile.filter(
r'^USE_MPI\s*=.*',
'USE_MPI = {0}'.format(spec.variants['mpi'].value).upper()
)
makefile.filter(
r'^USE_CXX11\s*=.*',
'USE_CXX11 = TRUE'
)
makefile.filter(
r'^USE_VOLRENDER\s*=.*',
'USE_VOLRENDER = FALSE'
)
makefile.filter(
r'^USE_PARALLELVOLRENDER\s*=.*',
'USE_PARALLELVOLRENDER = FALSE'
)
makefile.filter(
r'^USE_PROFPARSER\s*=.*',
'USE_PROFPARSER = {0}'.format(
spec.variants['profiling'].value
).upper()
)
# A bit risky here deleting all /usr and /opt X
# library default search paths in makefile
makefile.filter(
r'^.*\b(usr|opt)\b.*$',
'# Spack removed INCLUDE_LOCATIONS and LIBRARY_LOCATIONS'
)
# Read GNUmakefile into array
with open('GNUmakefile', 'r') as file:
contents = file.readlines()
# Edit GNUmakefile includes and libraries to point to Spack
# dependencies.
# The safest bet is to put the LIBRARY_LOCATIONS and
# INCLUDE_LOCATIONS at the beginning of the makefile.
line_offset = 0
count = 0
for lib in ['libsm', 'libice', 'libxpm', 'libx11',
'libxt', 'libxext', 'motif']:
contents.insert(
line_offset + count,
'LIBRARY_LOCATIONS += {0}\n'.format(spec[lib].prefix.lib)
)
contents.insert(
line_offset + count + 1,
'INCLUDE_LOCATIONS += {0}\n'.format(spec[lib].prefix.include)
)
count += 1
# Write GNUmakefile
with open('GNUmakefile', 'w') as file:
file.writelines(contents)
def setup_build_environment(self, env):
# We don't want an AMREX_HOME the user may have set already
env.unset('AMREX_HOME')
# Help force Amrvis to not pick up random system compilers
if '+mpi' in self.spec:
env.set('MPI_HOME', self.spec['mpi'].prefix)
env.set('CC', self.spec['mpi'].mpicc)
env.set('CXX', self.spec['mpi'].mpicxx)
env.set('F77', self.spec['mpi'].mpif77)
env.set('FC', self.spec['mpi'].mpifc)
def install(self, spec, prefix):
# Install exe manually
mkdirp(prefix.bin)
install('*.ex', prefix.bin)
|
patricklaw/pants | src/python/pants/backend/java/dependency_inference/types.py | Python | apache-2.0 | 1,065 | 0.001878 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Sequence
@dataclass(frozen=True)
class JavaImport:
name: str
is_static: bool = False
is_asterisk: bool = False
@classmethod
def from_jso | n_dict(cls, imp: dict[str, Any]) -> JavaImport:
return cls(
name=imp["name"],
is_asterisk=imp["isAsterisk"],
is_static=imp["isStatic"],
)
@dataclass(frozen=True)
class JavaSourceDependencyAnalysis:
declared_package: str
imports: Sequence[JavaImport]
top_level_types: Sequence[str]
@classmethod
def from_json_dict(cls, analysis: dict[str, Any]) -> JavaSourceDepende | ncyAnalysis:
return cls(
declared_package=analysis["declaredPackage"],
imports=[JavaImport.from_json_dict(imp) for imp in analysis["imports"]],
top_level_types=analysis["topLevelTypes"],
)
|
thiagopbueno/mdp-problog | tests/test_mdp.py | Python | gpl-3.0 | 5,866 | 0.003409 | #! /usr/bin/env python3
# This file is part of MDP-ProbLog.
# MDP-ProbLog is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MDP-ProbLog is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MDP-ProbLog. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../mdpproblog'))
import unittest
import mdp as mdp
from fluent import StateSpace, ActionSpace
class TestMDP(unittest.TestCase):
@classmethod
def setUp(cls):
cls.models = {
'sysadmin':
"""
computer(c1). computer(c2). computer(c3).
connected(c1,[c2,c3]). connected(c2,[c1]). connected(c3,[c1]).
accTotal([],A,A).
accTotal([_|T],A,X) :- B is A+1, accTotal(T,B,X).
total(L,T) :- accTotal(L,0,T).
total_connected(C,T) :- connected(C,L), total(L,T).
accAlive([],A,A).
accAlive([H|T],A,X) :- running(H,0), B is A+1, accAlive(T,B,X).
accAlive([H|T],A,X) :- not(running(H,0)), B is A, accAlive(T,B,X).
alive(L,A) :- accAlive(L,0,A).
total_running(C,R) :- connected(C,L), alive(L,R).
state_fluent(running(C)) :- computer(C).
action(reboot(none)).
action(reboot(C)) :- computer(C).
1.00::running(C,1) :- reboot(C).
0.05::running(C,1) :- not(reboot(C)), not(running(C,0)).
P::running(C,1) :- not(reboot(C)), running(C,0),
total_connected(C,T), total_running(C,R), P is 0.45+0.50*R/T.
utility(running(C,0), 1.00) :- computer(C).
utility(reboot(C), -0.75) :- computer(C).
utility(reboot(none), 0.00).
"""
}
cls.mdp = mdp.MDP(cls.models['sysadmin'])
def test_fluent_states(self):
expected_state_fluents = ['running(c1)', 'running(c2)', 'running(c3)']
actual_state_fluents = [str(f) for f in self.mdp.state_fluents()]
self.assertEqual(actual_state_fluents, expected_state_fluents)
def test_current_state_fluents(self):
expected_current_state_fluents = ['running(c1,0)', 'running(c2,0)', 'running(c3,0)']
actual_current_state_fluents = [str(f) for f in self.mdp.current_state_fluents()]
self.assertEqual(actual_current_state_fluents, expected_current_state_fluents)
def test_next_state_fluents(self):
expected_next_state_fluents = ['running(c1,1)', 'running(c2,1)', 'running(c3,1)']
actual_next_state_fluents = [str(f) for f in self.mdp.next_state_fluents()]
self. | assertEqual(actual_next_state_fluents, expected_next_state_fluents)
def test_actions(self):
expected_actions = ['reboot(c1)', 'reboot(c2)', 'reboot(c3)', 'reboot(none)']
actual_actions = [str(a) for a in self.mdp.actions()] |
self.assertEqual(actual_actions, expected_actions)
def test_transition(self):
states = StateSpace(self.mdp.current_state_fluents())
actions = ActionSpace(self.mdp.actions())
for state in states:
for j, action in enumerate(actions):
probabilities = self.mdp.transition(state, action)
for k, (term, prob) in enumerate(probabilities):
if k == j:
self.assertAlmostEqual(prob, 1.0)
elif list(state.values())[k] == 0:
self.assertAlmostEqual(prob, 0.05)
else:
connected = [[1, 2], [0], [0]]
alive = sum([x for i, x in enumerate(state.values()) if i in connected[k]])
total = len(connected[k])
self.assertAlmostEqual(prob, 0.45 + 0.50 * alive / total)
def test_transition_model(self):
actions = self.mdp.actions()
current_state_fluents = self.mdp.current_state_fluents()
model = self.mdp.transition_model()
self.assertEqual(len(model), len(actions) * 2**len(current_state_fluents))
for (state, action) in model:
probabilities = tuple(prob for term, prob in model[(state, action)])
self.assertEqual(len(probabilities), len(current_state_fluents))
self.assertTrue(all([p >= 0.0 and p <= 1.0 for p in probabilities]))
def test_reward(self):
states = StateSpace(self.mdp.current_state_fluents())
actions = ActionSpace(self.mdp.actions())
for state in states:
state_reward = 0
for (fluent, value) in state.items():
if value:
state_reward += 1.0
for action in actions:
action_cost = 0
for (a, value) in action.items():
if value and a.__str__() != 'reboot(none)':
action_cost += 0.75
reward = self.mdp.reward(state, action)
self.assertAlmostEqual(reward, state_reward - action_cost)
def test_reward_model(self):
rewards = self.mdp.reward_model()
states = StateSpace(self.mdp.current_state_fluents())
actions = ActionSpace(self.mdp.actions())
self.assertEqual(len(rewards), len(states) * len(actions))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
rbernand/transfert | tests/fonct/test_copy.py | Python | mit | 916 | 0 | import os
import transfert
from transfert import Resource
import transfert.actions
import transfert.exceptions
from .utils import delete_files
def test_copy(tmpdir, storages):
f = tmpdir.join('alpha')
f.write_binary(os.urandom(1024 * 40))
f_http = Resource(storages['http']('index.html'))
f_file = Resource(storages['file'](f.strpath))
f_sftp = Resource(storages['sftp']('/gamma'))
f_ftp = Resource(storages['ftp']('/beta'))
assert f_http.exists()
delete_files(f_file, f_sftp, f_ftp)
assert not f_file.exists()\
and not f_sftp.exists()\
and not f | _ftp.exists()
transfert.actions.copy(f_http, f_ftp, size=40960)
assert f_ftp.exists() and f_http.exists()
transfert.actions.copy(f_ftp, f_sftp, size=40960)
assert f_ftp.exists() and f_sftp.exists()
transfert.actions.copy(f_sftp, f_file, size=40960)
assert f_sftp.exists() and f_file | .exists()
|
stardog-union/stardog-graviton | aws/etc/packer/tools/python/stardog/cluster/update_stardog.py | Python | apache-2.0 | 1,959 | 0 | import logging
import subprocess
import sys
import stardog.cluster.utils as utils
def upload_file(ip, upload_file):
scp_opts = "-o StrictHostKeyChecking=no -o | UserKnownHostsFile=/dev/null"
cmd = "scp -r %s %s %s:%s" % (scp_opts, uploa | d_file, ip, upload_file)
return utils.command(cmd)
def refresh_stardog_binaries(ip, release_file):
ssh_opts = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
refresh_cmd = "/usr/local/bin/stardog-refresh-binaries"
cmd = "ssh %s %s '%s %s'" % (ssh_opts, ip, refresh_cmd, release_file)
return utils.command(cmd)
def stop_stardog(ip):
ssh_opts = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
refresh_cmd = "sudo systemctl stop stardog"
cmd = "ssh %s %s '%s'" % (ssh_opts, ip, refresh_cmd)
return utils.command(cmd)
def start_stardog(ip):
ssh_opts = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
refresh_cmd = "sudo systemctl start stardog"
cmd = "ssh %s %s '%s'" % (ssh_opts, ip, refresh_cmd)
return utils.command(cmd)
def main():
deploy_name = sys.argv[1]
logging.debug("Deployment name: %s" % deploy_name)
count = int(sys.argv[2])
logging.debug("Count: %s" % count)
release_file = sys.argv[3]
logging.debug("Release file: %s" % release_file)
region = utils.get_region()
ips = utils.get_internal_ips_by_asg(deploy_name, count, region)
errors = []
for ip in ips:
rc, err = upload_file(ip, release_file)
if rc != 0:
errors.append(err)
for ip in ips:
rc, err = stop_stardog(ip)
if rc != 0:
errors.append(err)
for ip in ips:
rc, err = refresh_stardog_binaries(ip, release_file)
if rc != 0:
errors.append(err)
for ip in ips:
rc, err = start_stardog(ip)
if rc != 0:
errors.append(err)
if errors:
raise Exception(errors)
return 0
|
genialis/resolwe | resolwe/flow/managers/state.py | Python | apache-2.0 | 1,297 | 0.001542 | """.. Ignore | pydocstyle D400.
=====
State
=====
Constants used by the dispatcher.
.. autofunction:: resolwe.flow.managers.state.update_constants
"""
# This module should not import anything local, or there wi | ll be circular
# dependencies, since the constants are needed in various sub-modules inside
# resolwe.flow.managers.
from collections import namedtuple
from django.conf import settings
ManagerChannelPair = namedtuple("ManagerChannelPair", ["queue", "queue_response"])
MANAGER_CONTROL_CHANNEL = "DUMMY.control"
LISTENER_CONTROL_CHANNEL = "DUMMY.listener"
def update_constants():
"""Recreate channel name constants with changed settings.
This kludge is mostly needed due to the way Django settings are
patched for testing and how modules need to be imported throughout
the project. On import time, settings are not patched yet, but some
of the code needs static values immediately. Updating functions such
as this one are then needed to fix dummy values.
"""
global MANAGER_CONTROL_CHANNEL, LISTENER_CONTROL_CHANNEL
redis_prefix = getattr(settings, "FLOW_MANAGER", {}).get("REDIS_PREFIX", "")
MANAGER_CONTROL_CHANNEL = "{}.control".format(redis_prefix)
LISTENER_CONTROL_CHANNEL = "{}.listener".format(redis_prefix)
update_constants()
|
tseaver/google-cloud-python | monitoring/google/cloud/monitoring_v3/gapic/notification_channel_service_client.py | Python | apache-2.0 | 52,223 | 0.002221 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.monitoring.v3 NotificationChannelService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.api import metric_pb2 as api_metric_pb2
from google.api import monitored_resource_pb2
from google.cloud.monitoring_v3.gapic import enums
from google.cloud.monitoring_v3.gapic import notification_channel_service_client_config
from google.cloud.monitoring_v3.gapic.transports import (
notification_channel_service_grpc_transport,
)
from google.cloud.monitoring_v3.proto import alert_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc
from google.cloud.monitoring_v3.proto import common_pb2
from google.cloud.monitoring_v3.proto import group_pb2
from google.cloud.monitoring_v3.proto import group_service_pb2
from google.cloud.monitoring_v3.proto import group_service_pb2_grpc
from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc
from google.cloud.monitoring_v3.proto import notification_pb2
from google.cloud.monitoring_v3.proto import notification_service_pb2
from google.cloud.monitoring_v3.proto import notification_service_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-monitoring"
).version
class NotificationChannelServiceClient(object):
"""
The Notification Channel API provides access to configuration that
controls how messages related to incidents are sent.
"""
SERVICE_ADDRESS = "monitoring.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.monitoring.v3.NotificationChannelService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
NotificationChannelServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def notification_channel_path(cls, project, notification_channel):
"""Return a fully-qualified notification_channel string."""
return google.api_core.path_template.expand(
"projects/{project}/notificationChannels/{notification_channel}",
project=project,
notification_channel=notification_channel,
)
@classmethod
def notification_channel_descriptor_path(cls, project, cha | nnel_descriptor):
"""Return a fully-qualified n | otification_channel_descriptor string."""
return google.api_core.path_template.expand(
"projects/{project}/notificationChannelDescriptors/{channel_descriptor}",
project=project,
channel_descriptor=channel_descriptor,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.NotificationChannelServiceGrpcTransport,
Callable[[~.Credentials, type], ~.NotificationChannelServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = notification_channel_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the s |
rananda/cfme_tests | cfme/tests/containers/test_reload_button_provider.py | Python | gpl-2.0 | 2,719 | 0.002574 | import pytest
from cfme.containers.provider import ContainersProvider
from utils import testgen, version
from cfme.web_ui import toolbar as tb
from utils.appliance.implementations.ui import navigate_to
pytestmark = [
pytest.mark.uncollectif(
lambda: version.current_version() < "5.6"),
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(2)]
pytest_generate_tests = testgen.generate([ContainersProvider], scope='function')
@pytest.mark.polarion('CMP-9878')
def test_reload_button_provider(provider):
""" This test verifies the data integrity of the fields in
the Relationships table after clicking the "reload"
button. Fields that are being verified as part of provider.validate.stats():
Projects, Routes, Container Services, Replicators, Pods, Containers, and Nodes.
Images are being validated separately, since the total
number of images in CFME 5.7 includes all images from the OSE registry as well
as the images that are being created from the running pods. The images are searched
according to the @sha. Image Registries are also validated separately.
"""
navigate_to(provider, 'Details')
tb.select('Reload Current Display')
provider.validate_stats(ui=True)
list_img_from_registry = provider.mgmt.list_image()
list_img_from_registry_splitted = [i.id.split(
'@sha256:')[-1] for i in list_img_from_registry]
list_img_from_openshift = provider.mgmt.list_image_openshift()
list_img_from_openshift_splitted = [d['name']
for d in list_img_from_openshift]
list_img_from_openshift_parsed = [i[7:].split(
'@sha256:')[-1] for i in list_img_from_openshift_splitted]
list_img_from_registry_splitted_new = set(list_img_from_registry_splitted)
list_img_from_openshift_parsed_new = set(list_img_from_openshift_parsed)
list_img_from_openshift_parsed_new.update(l | ist_img_from_registry_splitted_new)
num_img_in_cfme = provider.num_image()
# TODO Fix num_image_ui()
num_img_cfme_56 = len(provider.mgmt.list_image())
num_img_cfme_57 = len(list_img_from_openshift_parsed_new)
assert num_img_in_cfme == version.pick({version.LOWEST: num_img_cfme_56,
'5.7': num_img_cfme_57})
# validate the number of image registries
list_all_rgstr = provider.mgmt.list_image_registry()
list_all_rgstr_revised = [i.h | ost for i in list_all_rgstr]
list_all_rgstr_new = filter(lambda ch: 'openshift3' not in ch, list_all_rgstr_revised)
num_rgstr_in_cfme = provider.summary.relationships.image_registries.value
assert len(list_all_rgstr_new) == num_rgstr_in_cfme
|
talkincode/ToughPORTAL | toughportal/common/secret.py | Python | agpl-3.0 | 344 | 0.017442 | #!/usr/bin/env python
#coding:utf-8
import sys,os
from toughportal.common | import utils
import shutil
import time
impo | rt random
import ConfigParser
def gen_secret(clen=32):
rg = random.SystemRandom()
r = list('1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
return ''.join([rg.choice(r) for _ in range(clen)])
|
pmacosta/putil | tests/test_ptypes.py | Python | mit | 9,287 | 0.003338 | # test_ptypes.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,W0108
# Standard library imports
import sys
import numpy
# Putil imports
import putil.ptypes
from putil.test import AE, AI
###
# Global variables
###
emsg = lambda msg: (
'[START CONTRACT MSG: {0}]Argument `*[argument_name]*` '
'is not valid[STOP CONTRACT MSG]'.format(msg)
)
###
# Helper functions
###
def check_contract(obj, name, value):
AE(obj, ValueError, emsg(name), obj=value)
###
# Test functions
###
def test_color_space_option_contract():
""" Tests for LineStyleOption pseudo-type """
obj = putil.ptypes.color_space_option
check_contract(obj, 'color_space_option', 5)
exmsg = (
"[START CONTRACT MSG: color_space_option]Argument "
"`*[argument_name]*` is not one of 'binary', 'Blues', 'BuGn', "
"'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', "
"'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', "
"'YlOrBr' or 'YlOrRd' (case insensitive)[STOP CONTRACT MSG]"
)
AE(obj, ValueError, exmsg, obj='x')
for item in [
'binary', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens',
'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd',
'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr',
'YlOrRd']:
putil.ptypes.color_space_option(item)
def test_csv_col_filter_contract():
""" Test CsvColFilter pseudo-type """
items = [True, 1.0, [], [1, True, 3], ['a', 'b', True]]
for item in items:
check_contract(putil.ptypes.csv_col_filter, 'csv_col_filter', item)
items = [None, 1, 'a', [1, 2], ['a']]
for item in items:
putil.ptypes.csv_col_filter(item)
def test_csv_col_sort_contract():
""" Test CsvColSort pseudo-type """
items = [
True, None, ['a', None], {(1, 2):'A'}, {'a':True}, {0:'hello'}, []
]
for item in items:
check_contract(putil.ptypes.csv_col_sort, 'csv_col_sort', item)
items = [
1,
'a',
{'a':'D'},
{0:'d'},
{1:'a'},
[1, 'a'],
[1, 'a', {'b':'d'}, {0:'A'}]
]
for item in items:
putil.ptypes.csv_col_sort(item)
def test_csv_data_filter_contract():
""" Test CsvDataFilter pseudo-type """
items = [
True,
(1, 2, 3),
(True, 'A'),
(True, ),
(None, True),
('A', 'A'),
({'B':1}, {'C':5}),
{2.0:5},
({2.0:5}, 'A'),
(['A', True], {'A':1}),
('A', {}),
([], {'A':1}),
({}, []),
{'dfilter':{'a':{'xx':2}}},
{'dfilter':{'a':[3, {'xx':2}]}}
]
for item in items:
check_contract(putil.ptypes.csv_data_filter, 'csv_data_filter', item)
items = [
None,
(None, ),
(None, None),
1,
'A',
['B', 1],
{'A':1},
{'A':1, 'B':2}
]
for item in items:
putil.ptypes.csv_data_filter(item)
def test_csv_filtered_contract():
""" Test CsvFiltered pseudo-type """
for item in [5, 'BC']:
check_contract(putil.ptypes.csv_filtered, 'csv_filtered', item)
for item in [True, False, 'B', 'b', 'C', 'c', 'R', 'r', 'N', 'n']:
putil.ptypes.csv_filtered(item)
def test_csv_row_filter_contract():
""" Test CsvRowFilter pseudo-type """
items = [
'a',
{5.0:10},
{'a':{'xx':2}},
{'a':[3, {'xx':2}]},
{'b':True}
]
for item in items:
check_contract(putil.ptypes.csv_row_filter, 'csv_row_filter', item)
exmsg = (
'[START CONTRACT MSG: csv_row_filter]Argument '
'`*[argument_name]*` is empty[STOP CONTRACT MSG]'
)
AE(putil.ptypes.csv_row_filter, ValueError, exmsg, obj={})
items = [None, {'x':5 | }]
for item in items:
putil.ptypes.csv_row_filter(item)
def test_engineering_notation_number():
""" Test EngineeringNotationNumber pseudo-type """
obj = putil.ptypes.engi | neering_notation_number
items = ['3.12b', 'f', 'a1b', ' + 123.45f ']
for item in items:
check_contract(obj, 'engineering_notation_number', item)
items = [' +123.45f ', ' -0 ']
for item in items:
obj(item)
def test_engineering_notation_suffix():
""" Test EngineeringNotationSuffix pseudo-type """
obj = putil.ptypes.engineering_notation_suffix
check_contract(obj, 'engineering_notation_suffix', 'b')
obj('u')
def test_file_name_contract():
""" Test for file_name custom contract """
@putil.pcontracts.contract(sfn='file_name')
def func(sfn):
""" Sample function to test file_name custom contract """
return sfn
items = [3, 'test\0']
for item in items:
AI(func, 'sfn', sfn=item)
func('some_file.txt')
# Test with Python executable (should be portable across systems), file
# should be valid although not having permissions to write it
func(sys.executable)
def test_file_name_exists_contract():
""" Test for file_name_exists custom contract """
@putil.pcontracts.contract(sfn='file_name_exists')
def func(sfn):
""" Sample function to test file_name_exists custom contract """
return sfn
items = [3, 'test\0']
for item in items:
AI(func, 'sfn', sfn=item)
exmsg = 'File _file_does_not_exist could not be found'
AE(func, OSError, exmsg, sfn='_file_does_not_exist')
# Test with Python executable (should be portable across systems)
func(sys.executable)
def test_function_contract():
""" Tests for Function pseudo-type """
def func1():
pass
check_contract(putil.ptypes.function, 'function', 'a')
items = (func1, None)
for item in items:
putil.ptypes.function(item)
def test_increasing_real_numpy_vector_contract():
""" Tests for IncreasingRealNumpyVector pseudo-type """
obj = putil.ptypes.increasing_real_numpy_vector
items = [
'a',
[1, 2, 3],
numpy.array([]),
numpy.array([[1, 2, 3], [4, 5, 6]]),
numpy.array(['a', 'b']),
numpy.array([1, 0, -3]),
numpy.array([10.0, 8.0, 2.0])
]
for item in items:
check_contract(obj, 'increasing_real_numpy_vector', item)
items = [
numpy.array([1, 2, 3]),
numpy.array([10.0, 12.1, 12.5]),
numpy.array([10.0])
]
for item in items:
obj(item)
def test_interpolation_option_contract():
""" Tests for InterpolationOption pseudo-type """
obj = putil.ptypes.interpolation_option
check_contract(obj, 'interpolation_option', 5)
exmsg = (
"[START CONTRACT MSG: interpolation_option]Argument "
"`*[argument_name]*` is not one of ['STRAIGHT', 'STEP', 'CUBIC', "
"'LINREG'] (case insensitive)[STOP CONTRACT MSG]"
)
AE(obj, ValueError, exmsg, obj='x')
obj(None)
for item in ['STRAIGHT', 'STEP', 'CUBIC', 'LINREG']:
obj(item)
obj(item.lower())
def test_line_style_option_contract():
""" Tests for LineStyleOption pseudo-type """
check_contract(putil.ptypes.line_style_option, 'line_style_option', 5)
exmsg = (
"[START CONTRACT MSG: line_style_option]Argument "
"`*[argument_name]*` is not one of ['-', '--', '-.', "
"':'][STOP CONTRACT MSG]"
)
AE(putil.ptypes.line_style_option, ValueError, exmsg, obj='x')
putil.ptypes.line_style_option(None)
for item in ['-', '--', '-.', ':']:
putil.ptypes.line_style_option(item)
def test_non_negative_integer():
""" Test PosInteger pseudo-type """
obj = putil.ptypes.non_negative_integer
items = ['b', True, -3, 5.2]
for item in items:
check_contract(obj, 'non_negative_integer', item)
items = [0, 2]
for item in items:
obj(item)
def test_offset_range_contract():
""" Tests for PositiveRealNumber pseudo-type """
items = ['a', [1, 2, 3], False, -0.1, -1.1]
for item in items:
check_contract(putil.ptypes.offset_range, 'offset_range', item)
items = [0, 0.5, 1]
for item in items:
putil.ptypes.offset_range(item)
def test_positive_r |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/google-api-python-client/apiclient/ext/django_orm.py | Python | bsd-3-clause | 1,509 | 0.007952 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expres | s or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
impor | t apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
|
AlmostBetterNetwork/podmaster-host | podcasts/migrations/0015_auto_20160503_0248.py | Python | apache-2.0 | 667 | 0 | # -*- coding: utf-8 -*-
from __future__ import absolute | _import
# Generated by Django 1.9 on 2016-05-03 02:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0014_auto_20160503_0247'),
]
operations = [
migrations.RemoveField(
model_name='podcast',
name='tip_last_payout',
),
migrations.RemoveField(
model_name='podcast', |
name='tip_last_payout_amount',
),
migrations.RemoveField(
model_name='podcast',
name='tip_value',
),
]
|
kamsuri/vms | vms/administrator/admin.py | Python | gpl-2.0 | 217 | 0 | # Django
from administrator.models import Administrator
# local Django
from django.contrib import admin |
class AdministratorAdmin(admin.ModelAdmin):
pass
admin.site.register(Administrator, Administra | torAdmin)
|
repotvsupertuga/tvsupertuga.repository | script.module.universalscrapers/lib/universalscrapers/modules/js2py/legecy_translators/objects.py | Python | gpl-2.0 | 11,176 | 0.008053 | """ This module removes all objects/arrays from JS source code and replace them with LVALS.
Also it has s function translating removed object/array to python code.
Use this module just after removing constants. Later move on to removing functions"""
OBJECT_LVAL = 'PyJsLvalObject%d_'
ARRAY_LVAL = 'PyJsLvalArray%d_'
from utils import *
from jsparser import *
from nodevisitor import exp_translator
import functions
from flow import KEYWORD_METHODS
def FUNC_TRANSLATOR(*a):# stupid import system in python
raise RuntimeError('Remember to set func translator. Thank you.')
def set_func_translator(ftrans):
# stupid stupid Python or Peter
global FUNC_TRANSLATOR
FUNC_TRANSLATOR = ftrans
def is_empty_object(n, last):
"""n may be the inside of block or object"""
if n.strip():
return False
# seems to be but can be empty code
last = last.strip()
markers = {')', ';',}
if not last or last[-1] in markers:
return False
return True
# todo refine this function
def is_object(n, last):
"""n may be the inside of block or object.
last is the code before object"""
if is_empty_object(n, last):
return True
if not n.strip():
return False
#Object contains lines of code so it cant be an object
if len(argsplit(n, ';'))>1:
return False
cands = argsplit(n, ',')
if not cands[-1].strip():
return True # {xxxx,} empty after last , it must be an object
for cand in cands:
cand = cand.strip()
# separate each candidate element at : in dict and check whether they are correct...
kv = argsplit(cand, ':')
if len(kv) > 2: # set the len of kv to 2 because of this stupid : expression
kv = kv[0],':'.join(kv[1:])
if len(kv)==2:
# key value pair, check whether not label or ?:
k, v = kv
if not is_lval(k.strip()):
return False
v = v.strip()
if v.startswith('function'):
continue
#will fail on label... {xxx: while {}}
if v[0]=='{': # value cant be a code block
return False
for e in KEYWORD_METHODS:
# if v starts with any statement then return false
if v.startswith(e) and len(e)<len(v) and v[len(e)] not in IDENTIFIER_PART:
return False
elif not (cand.startswith('set ') or cand.startswith('get ')):
return False
return True
def is_array(last):
#it can be prop getter
last = last.strip()
if any(endswith_keyword(last, e) for e in ['return', 'new', 'void', 'throw', 'typeof', 'in', 'instanceof']):
return True
markers = {')', ']'}
return not last or not (last[-1] in markers or last[-1] in IDENTIFIER_PART)
def remove_objects(code, count=1):
""" This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count.
count arg is the number that should be added to the LVAL of the first replaced object
"""
replacements = {} #replacement dict
br = bracket_split(code, ['{}', '[]'])
res = ''
last = ''
for e in br:
#test whether e is an object
if e[0]=='{':
n, temp_rep, cand_count = remove_objects(e[1:-1], count)
# if e was not an object then n should not contain any :
if is_object(n, last):
#e was an object
res += ' '+OBJECT_LVAL % count
replacements[OBJECT_LVAL % count] = e
count += 1
else:
# e was just a code block but could contain objects inside
res += '{%s}' % n
count = cand_count
replacements.update(temp_rep)
elif e[0]=='[':
if is_array(last):
res += e # will be translated later
else: # prop get
n, rep, count = remove_objects(e[1:-1], count)
res += '[%s]' % n
replacements.update(rep)
else: # e does not contain any objects
res += e
last = e #needed to test for this stipid empty object
return res, replacements, count
def remove_arrays(code, count=1):
"""removes arrays and replaces them with ARRAY_LVALS
returns new code and replacement dict
*NOTE* has to be called AFTER remove objects"""
res = ''
last = ''
replacements = {}
for e in bracket_split(code, ['[]']):
if e[0]=='[':
if is_array(last):
name = ARRAY_LVAL % count
res += ' ' + name
replacements[name] = e
count += 1
else: # pseudo array. But pseudo array can contain true array. for example a[['d'][3]] has 2 pseudo and 1 true array
cand, new_replacements, count = remove_arrays(e[1:-1], count)
res += '[%s]' % cand
replacements.update(new_replacements)
else:
res += e
last = e
return res, replacements, count
def translate_object(obj, lval, obj_count=1, arr_count=1):
obj = obj[1:-1] # remove {} from both ends
obj, obj_rep, obj_count = remove_objects(obj, obj_count)
obj, arr_rep, arr_count = remove_arrays(obj, arr_count)
# functions can be defined inside objects. exp translator cant translate them.
# we have to remove them and translate with func translator
# its better explained in translate_array function
obj, hoisted, inline = functions.remove_functions(obj, all_inline=True)
assert not hoisted
gsetters_after = ''
keys = argsplit(obj)
res = []
for i, e in enumerate(keys, 1):
e = e.strip()
if e.startswith('set '):
gsetters_after += translate_setter(lval, e)
elif e.startswith('get '):
gsetters_after += translate_getter(lval, e)
elif ':' not in e:
if i<len(keys): # can happen legally only in the last element {3:2,}
raise SyntaxError('Unexpected "," in Object literal')
break
else: #Not getter, setter or elision
spl = argsplit(e, ':')
if len(spl)<2:
raise SyntaxError('Invalid Object literal: '+e)
try:
key, value = spl
except: #len(spl)> 2
print 'Unusual case ' + repr(e)
key = spl[0]
value = ':'.join(spl[1:])
key = key.strip()
if is_internal(key):
key = '%s.to_string().value' % key
else:
key = repr(key)
value = exp_translator(value)
if not value:
raise SyntaxError('Missing value in Object literal')
res.append('%s:%s' % (key, value))
res = '%s = Js({%s})\n' % (lval, ','.join(res)) + gsetters_after
# translate all the nested objects (including removed earlier functions)
for nested_name, nested_info in inline.iteritems(): # functions
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
res = new_def + res
for lval, obj in obj_rep.iteritems(): #objects
new_def, obj_count, arr_count = translate_object(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
res = new_def + res
for lval, obj in arr_rep.iteritems(): # arrays
new_def, obj_count, arr_count = translate_array(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
res = new_def + res
return res, obj_count, arr_count
def translate_setter(lval, setter):
func = 'function' + setter[3:]
try:
_, data, _ = functions.remove_functions(func)
i | f not data or len(data)>1:
raise Exception()
e | xcept:
raise SyntaxError('Could not parse setter: '+setter)
prop = data.keys()[0]
body, args = data[prop]
if len(args)!=1: #setter must have exactly 1 argument
raise SyntaxError('Invalid setter. It must take exactly 1 argument.')
# now |
YoshikawaMasashi/magenta | magenta/models/shared/events_rnn_graph.py | Python | apache-2.0 | 16,101 | 0.011738 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides function to build an event sequence RNN model's graph."""
# internal imports
import tensorflow as tf
import magenta
def make_rnn_cell(rnn_layer_sizes,
dropout_keep_prob=1.0,
attn_length=0,
base_cell=tf.contrib.rnn.BasicLSTMCell,
state_is_tuple=False):
"""Makes a RNN cell from the given hyperparameters.
Args:
rnn_layer_sizes: A list of integer sizes (in units) for each layer of the
RNN.
dropout_keep_prob: The float probability to keep the output of any given
sub-cell.
attn_length: The size of the attention vector.
base_cell: The base tf.contrib.rnn.RNNCell to use for sub-cells.
state_is_tuple: A boolean specifying whether to use tuple of hidden matrix
and cell matrix as a state instead of a concatenated matrix.
Returns:
A tf.contrib.rnn.MultiRNNCell based on the given hyperparameters.
"""
cells = []
for num_units in rnn_layer_sizes:
cell = base_cell(num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=dropout_keep_prob)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=state_is_tuple)
if attn_length:
cell = tf.contrib.rnn.AttentionCellWrapper(
cell, attn_length, state_is_tuple=state_is_tuple)
return cell
def dilated_cnn(inputs,
initial_state,
input_size,
block_num=1,
block_size=7,
dropout_keep_prob=1.0,
residual_cnl=16,
dilation_cnl=8,
output_cnl=32,
use_gate=True,
use_step=True,
mode='train'):
"""
Returns outputs of dilated CNN from the given hyperparameters.
This model uses convolution neural network as generative model like Wavenet.
Args:
inputs: Model inputs.
initial_state: A numpy array containing initial padding buffer of
generative CNN model.
input_size: The size of input vector.
block_num: The number of dilated convolution blocks.
block_size: The size of dilated convolution blocks.
dropout_keep_prob: The float probability to keep the output of any given
sub-cell.
residual_cnl: The size of hidden residual state.
dilation_cnl: The size of hidden dilation state.
output_cnl: The size of output vector.
use_gate: A boolean specifying whether to use gated activation units.
use_step: A boolean specifying whether to use skip connection.
mode: 'train', 'eval', or 'generate'.
Returns:
outputs: Model outputs
final_state: A numpy array containing next padding buffer of generative
CNN model.
"""
if mode == 'train':
is_training = True
e | lse:
is_training = False
dilation = [2**i for i in range(block_size)]*block_num
batch_num = tf.shape(inputs)[0]
h = tf.reshape(inputs, [batch_num,-1,1,input_size])
dlt_sum = [sum(dilation[:i]) for i in range(len(dilation))]
dlt_sum. | append(sum(dilation))
with tf.variable_scope("first_conv"):
h = tf.contrib.layers.batch_norm(h, decay=0.999, center=True, scale=True,
updates_collections=None, is_training=is_training,
scope="first_conv", reuse=True)
first_weights = tf.get_variable(
"first_weights", [1,1,input_size,residual_cnl],
initializer=tf.random_normal_initializer())
h = tf.nn.conv2d(h, first_weights, strides=[1,1,1,1], padding='SAME')
final_state = []
if use_step:
step = []
for i,dlt in enumerate(dilation):
pad = initial_state[:,dlt_sum[i]*residual_cnl:dlt_sum[i+1]*residual_cnl]
pad = tf.reshape(pad,[batch_num,dlt,1,residual_cnl])
_h = h
h = tf.concat([pad,h],1)
_fs = tf.reshape(h[:,-dlt:,:,:],[batch_num,dlt*residual_cnl])
final_state.append(_fs)
with tf.variable_scope("conv{}".format(i)):
if use_gate:
gate_weights = tf.get_variable(
"gate_weights", [2,1,residual_cnl,dilation_cnl],
initializer=tf.random_normal_initializer())
gate_biases = tf.get_variable(
"gate_biases", [dilation_cnl],
initializer=tf.constant_initializer(0.0))
gate = tf.nn.atrous_conv2d(
h, gate_weights, dlt, padding="VALID")
gate = tf.contrib.layers.batch_norm(gate, decay=0.999, center=True,
scale=True, updates_collections=None,
is_training=is_training,
scope="gate_bn{}".format(i), reuse=True)
gate = tf.sigmoid(tf.nn.bias_add(gate, gate_biases))
filter_weights = tf.get_variable(
"filter_weights", [2,1,residual_cnl,dilation_cnl],
initializer=tf.random_normal_initializer())
filter_biases = tf.get_variable(
"filter_biases", [dilation_cnl],
initializer=tf.constant_initializer(0.0))
filtr = tf.nn.atrous_conv2d(
h, filter_weights, dlt, padding="VALID")
filtr = tf.contrib.layers.batch_norm(filtr, decay=0.999, center=True,
scale=True, updates_collections=None,
is_training=is_training,
scope="filter_bn{}".format(i), reuse=True)
filtr = tf.tanh(tf.nn.bias_add(filtr, filter_biases))
after_weights = tf.get_variable(
"after_weights", [1,1,dilation_cnl,residual_cnl],
initializer=tf.random_normal_initializer())
after_biases = tf.get_variable(
"after_biases", [residual_cnl],
initializer=tf.constant_initializer(0.0))
if use_gate:
after = tf.nn.conv2d(
gate*filtr, after_weights,strides=[1,1,1,1],padding='SAME')
else:
after = tf.nn.conv2d(
filtr, after_weights,strides=[1,1,1,1],padding='SAME')
after = tf.nn.bias_add(after, after_biases)
if use_step:
step.append(after)
h = after + _h
if use_step:
step = tf.concat(step,3)
step_weights = tf.get_variable(
"step_weights", [1,1,residual_cnl*len(dilation),output_cnl],
initializer=tf.random_normal_initializer())
h = tf.nn.conv2d(step, step_weights, strides=[1,1,1,1], padding='SAME')
h = tf.contrib.layers.batch_norm(h, decay=0.999, center=True, scale=True,
updates_collections=None, is_training=is_training,
scope="step_bn", reuse=True)
h = tf.nn.relu(h)
last_weights = tf.get_variable(
"last_weights", [1,1,output_cnl,output_cnl],
initializer=tf.random_normal_initializer())
else:
last_weights = tf.get_variable(
"last_weights", [1,1,residual_cnl,output_cnl],
initializer=tf.random_normal_initializer())
last_biases = tf.get_variable(
"last_biases", [output_cnl], initializer=tf.constant_initializer(0.0))
h = tf.nn.conv2d(h, last_weights, strides=[1,1,1,1], padding='SAME')
h = tf.contrib.layers.batch_norm(h, decay=0.999, center=True, scale=True,
updates_collections=None, is_training=is_training,
scope="last_bn", reuse=True)
h = tf.nn.relu(tf.nn.bias_add(h, last_biases))
h = tf.nn.dropout(h, dropout_keep_prob)
final_state = tf.concat(final_state,1)
outputs = tf.reshape(h, [batch_num,-1,output_cnl])
return outputs,final_state
def get_dilated_cnn_initial_state(batch_size,
|
felipenaselva/felipe.repository | script.module.streamhub/resources/premium/modules/control.py | Python | gpl-2.0 | 3,906 | 0.001792 | # -*- coding: utf-8 -*-
'''
Tulip routine libraries, based on lambda's lamlib
Author Twilight0
License summary below, for more details please read license.txt file
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more d | etails.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os, xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs
integer = 1000
addon | = xbmcaddon.Addon
lang = xbmcaddon.Addon().getLocalizedString
setting = xbmcaddon.Addon().getSetting
setSetting = xbmcaddon.Addon().setSetting
addonInfo = xbmcaddon.Addon().getAddonInfo
addItem = xbmcplugin.addDirectoryItem
directory = xbmcplugin.endOfDirectory
content = xbmcplugin.setContent
property = xbmcplugin.setProperty
resolve = xbmcplugin.setResolvedUrl
infoLabel = xbmc.getInfoLabel
condVisibility = xbmc.getCondVisibility
jsonrpc = xbmc.executeJSONRPC
keyboard = xbmc.Keyboard
sleep = xbmc.sleep
execute = xbmc.executebuiltin
skin = xbmc.getSkinDir()
player = xbmc.Player()
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
transPath = xbmc.translatePath
skinPath = xbmc.translatePath('special://skin/')
addonPath = xbmc.translatePath(addonInfo('path'))
dataPath = xbmc.translatePath(addonInfo('profile')).decode('utf-8')
window = xbmcgui.Window(10000)
dialog = xbmcgui.Dialog()
progressDialog = xbmcgui.DialogProgress()
windowDialog = xbmcgui.WindowDialog()
button = xbmcgui.ControlButton
image = xbmcgui.ControlImage
alphanum_input = xbmcgui.INPUT_ALPHANUM
password_input = xbmcgui.INPUT_PASSWORD
hide_input = xbmcgui.ALPHANUM_HIDE_INPUT
item = xbmcgui.ListItem
openFile = xbmcvfs.File
makeFile = xbmcvfs.mkdir
deleteFile = xbmcvfs.delete
deleteDir = xbmcvfs.rmdir
listDir = xbmcvfs.listdir
exists = xbmcvfs.exists
join = os.path.join
settingsFile = os.path.join(dataPath, 'settings.xml')
bookmarksFile = os.path.join(dataPath, 'bookmarks.db')
cacheFile = os.path.join(dataPath, 'cache.db')
def infoDialog(message, heading=addonInfo('name'), icon='', time=3000):
if icon == '':
icon = addonInfo('icon')
try:
dialog.notification(heading, message, icon, time, sound=False)
except:
execute("Notification(%s, %s, %s, %s)" % (heading, message, time, icon))
def okDialog(heading, line1):
return dialog.ok(heading, line1)
def inputDialog(heading, _type_=''):
return dialog.input(heading, _type_)
def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
return dialog.yesno(heading, line1, line2, line3, nolabel, yeslabel)
def selectDialog(list, heading=addonInfo('name')):
return dialog.select(heading, list)
def openSettings(query=None, id=addonInfo('id')):
try:
idle()
execute('Addon.OpenSettings(%s)' % id)
if query is None:
raise Exception()
c, f = query.split('.')
execute('SetFocus(%i)' % (int(c) + 100))
execute('SetFocus(%i)' % (int(f) + 200))
except:
return
def openSettings_alt():
try:
idle()
xbmcaddon.Addon().openSettings()
except:
return
def openPlaylist():
return execute('ActivateWindow(VideoPlaylist)')
def refresh():
return execute('Container.Refresh')
def idle():
return execute('Dialog.Close(busydialog)')
def set_view_mode(vmid):
return execute('Container.SetViewMode({0})'.format(vmid))
|
Rdbaker/Rank | manage.py | Python | mit | 2,039 | 0.000981 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask_script import Manager, Shell, Server
from flask_script.commands import Clean, ShowUrls
from flask_migrate import MigrateCommand, Migrat | e
from flask.ext.sqlalchemy import sqlalchemy
import seed_db
from rank.app import create_app
from rank.settings import DevConfig, ProdConfig
from rank.core.models import Base, DB
DEFAULT_DB = 'postgres'
CREATE_DB = 'create database %s'
if os.environ.get("RANK_ENV") == 'prod':
application = create_app(ProdConfig)
else:
application = create_app(DevConfig)
HERE | = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
migrate = Migrate(application, Base)
manager = Manager(application)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': application, 'db': DB}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
@manager.command
def setup_db():
"""Set up the local and test databases."""
(base_uri, local_db) = application.config['SQLALCHEMY_DATABASE_URI'].rsplit('/', 1)
engine = sqlalchemy.create_engine('/'.join([base_uri, DEFAULT_DB]))
conn = engine.connect()
conn.execute('commit')
conn.execute(CREATE_DB % local_db)
conn.execute('commit')
test_db = local_db + '_test'
conn.execute(CREATE_DB % test_db)
conn.close()
@manager.command
def seed_database():
"""Seed the database with the admins"""
if os.path.isfile('.admin.yml'):
seed_db.seed_admin('.admin.yml')
else:
raise Exception("No .admin.yml file was found in the top level directory.")
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
manager.add_command("urls", ShowUrls())
manager.add_command("clean", Clean())
if __name__ == '__main__':
manager.run()
|
timasjov/famous-algorithms | hashing/BloomFilter.py | Python | mit | 1,098 | 0.004554 | from random import randrange
MAX_K =16
DEFAULT_K = 8
def hash(word):
wordStr = str(word)
assert len(wordStr) <= MAX_K
value = 0
for n, ch in | enumerate(wordStr):
value += ord(ch) * 128 ** n
#value += 2 * ord(ch) ** n
return value
class BloomFilter(object):
allchars = "".join([chr(i) for i in range(128)])
def __init__(self, tablesizes, k=DEFAULT_K):
self.table | s = [(size, [0] * size) for size in tablesizes]
self.k = k
def add(self, word):
val = hash(word)
for size, ht in self.tables:
ht[val % size] = 1
def __contains__(self, word):
val = hash(word)
return all(ht[val % size] for (size, ht) in self.tables)
bloomFilter = BloomFilter([1001, 1003, 1005])
#bloomFilter = BloomFilter([100000])
lines = []
for line in open('1000_keys.txt'):
num = line.strip()
lines.append(num)
bloomFilter.add(num)
falsePositives = 0
for num in lines:
if not (num in bloomFilter):
falsePositives += 1
print('Number of false positives: ' + str(falsePositives))
|
syscoin/syscoin | test/functional/wallet_keypool_topup.py | Python | mit | 4,455 | 0.004265 | #!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
)
class KeypoolRestoreTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename)
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
| assert address_details["isscript"] and not address_details["iswitness"]
else:
assert not address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
self.nodes | [0].sendtoaddress(addr_extpool, 5)
self.generate(self.nodes[0], 1)
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
self.connect_nodes(0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if self.options.descriptors:
if output_type == 'legacy':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/44'/1'/0'/0/110")
elif output_type == 'p2sh-segwit':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49'/1'/0'/0/110")
elif output_type == 'bech32':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84'/1'/0'/0/110")
else:
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
|
Cedev/a10-neutron-lbaas | a10_neutron_lbaas/tests/unit/v1/test_base.py | Python | apache-2.0 | 1,417 | 0 | # Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of | the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
| # License for the specific language governing permissions and limitations
# under the License.
import a10_neutron_lbaas.tests.unit.test_base as test_base
class FakeModel(dict, object):
def __getitem__(self, key, default=None):
attr = getattr(self, key, default)
return attr
def get(self, key, default=None):
return getattr(self, key, default)
# def copy(self):
# import copy
# return copy.deepcopy(self)
class FakeHM(FakeModel):
def __init__(self, id="hm01", name="hm01"):
self.id = id
self.name = name
self.pools = []
class FakePool(FakeModel):
def __init__(self, id="p01", name="p01"):
self.id = id
self.name = name
class UnitTestBase(test_base.UnitTestBase):
def __init__(self, *args):
super(UnitTestBase, self).__init__(*args)
self.version = 'v1'
|
lscsoft/gwdetchar | gwdetchar/io/datafind.py | Python | gpl-3.0 | 7,423 | 0 | # coding=utf-8
# Copyright (C) Duncan Macleod (2015)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License | for more details.
#
# You should have received a copy of the GNU General Public License
# along with GW DetChar. If not, see <http://www.gnu.org/licenses/>.
"" | "gw_data_find wrappers
"""
import re
import warnings
from six.moves.urllib.error import HTTPError
try: # python >= 3
from json.decoder import JSONDecodeError
except ImportError: # python == 2.7
JSONDecodeError = ValueError
import gwdatafind
from ..const import DEFAULT_SEGMENT_SERVER
from gwpy.io import gwf as io_gwf
from gwpy.segments import (Segment, DataQualityFlag)
from gwpy.timeseries import (TimeSeries, TimeSeriesDict)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__credits__ = 'Alex Urban <alexander.urban@ligo.org>'
# -- utilities ----------------------------------------------------------------
def check_flag(flag, gpstime, duration, pad):
"""Check that a state flag is active during an entire analysis segment
Parameters
----------
flag : `str`
state flag to check
gpstime : `float`
GPS time of required data
duration : `float`
duration (in seconds) of required data
pad : `float`
amount of extra data to read in at the start and end for filtering
Returns
-------
check : `bool`
Boolean switch to pass (`True`) or fail (`False`) depending on whether
the given flag is active
"""
# set GPS start and end time
start = gpstime - duration/2. - pad
end = gpstime + duration/2. + pad
seg = Segment(start, end)
# query for state segments
active = DataQualityFlag.query(flag, start, end,
url=DEFAULT_SEGMENT_SERVER).active
# check that state flag is active during the entire analysis
if (not active.intersects_segment(seg)) or (abs(active[0]) < abs(seg)):
return False
return True
def remove_missing_channels(channels, gwfcache):
"""Find and remove channels from a given list that are not available in
a given cache of frame files
Parameters
----------
channels : `list` of `str`
list of requested channels
gwfcache : `list` of `str`
list of paths to .gwf files
Returns
-------
keep : `list` of `str`
list of common channels found in the first and last files in the
cache
Notes
-----
As a shorthand, this utility checks `channels` against only the first
and last frame files in `gwfcache`. This saves time and memory by not
loading tables of contents for large numbers of very long data files.
For every channel requested that is not available in `gwfcache`, a
`UserWarning` will be raised.
See Also
--------
gwpy.io.gwf.iter_channel_names
for the utility used to identify frame contents
"""
# get available channels from the first and last frame file
available = set(io_gwf.iter_channel_names(gwfcache[0]))
if len(gwfcache) > 1:
available.intersection_update(io_gwf.iter_channel_names(gwfcache[-1]))
# work out which channels to keep, and which to reject
channels = set(channels)
keep = channels & available
reject = channels - keep
for channel in reject:
warnings.warn(
'{} is being removed because it was not available in all '
'requested files'.format(channel), UserWarning)
return list(keep)
def get_data(channel, start, end, frametype=None, source=None,
nproc=1, verbose=False, **kwargs):
"""Retrieve data for given channels within a certain time range
Parameters
----------
channel : `str` or `list`
either a single channel name, or a list of channel names
start : `float`
GPS start time of requested data
end : `float`
GPS end time of requested data
frametype : `str`, optional
name of frametype in which channel(s) are stored, default: `None`
source : `str`, `list`, optional
path(s) of a LAL-format cache file or individual data file,
default: `None`
nproc : `int`, optional
number of parallel processes to use, uses serial process by default
verbose : `bool`, optional
print verbose output about NDS progress, default: False
**kwargs : `dict`, optional
additional keyword arguments to `~gwpy.timeseries.TimeSeries.read`
or `~gwpy.timeseries.TimeSeries.get`
Returns
-------
data : `~gwpy.timeseries.TimeSeries` or `~gwpy.timeseries.TimeSeriesDict`
collection of data for the requested channels in the requested time
range
Notes
-----
If `channel` is a `str`, then a `TimeSeries` object will be returned, else
the result is a `TimeSeriesDict`.
The `frametype` argument should be used to read from archived frame files,
while `source` should be used to read from a local cache or specific data
file. If either fails, or if neither is passed, this function will attempt
to get data over an NDS server.
If `frametype` is used to read from the archive, any channels missing
from the first or last frame file in the requested time range will be
ignored.
See Also
--------
remove_missing_channels
a utility that removes channels missing from the frame archive
gwpy.timeseries.TimeSeries.get
the underlying method to read data over an NDS server
gwpy.timeseries.TimeSeries.read
the underlying method to read data from local files
"""
# get TimeSeries class
if isinstance(channel, (list, tuple)):
series_class = TimeSeriesDict
else:
series_class = TimeSeries
if frametype is not None:
try: # locate frame files
ifo = re.search('[A-Z]1', frametype).group(0)
obs = ifo[0]
source = gwdatafind.find_urls(obs, frametype, start, end)
except AttributeError:
raise AttributeError(
'Could not determine observatory from frametype')
except (HTTPError, JSONDecodeError): # frame files not found
pass
if isinstance(source, list) and isinstance(channel, (list, tuple)):
channel = remove_missing_channels(channel, source)
if source is not None: # read from frame files
return series_class.read(
source, channel, start=start, end=end, nproc=nproc,
verbose=verbose, **kwargs)
# read single channel from NDS
if not isinstance(channel, (list, tuple)):
return series_class.get(
channel, start, end, verbose=verbose, **kwargs)
# if all else fails, process channels in groups of 60
data = series_class()
for group in [channel[i:i + 60] for i in range(0, len(channel), 60)]:
data.append(series_class.get(
group, start, end, verbose=verbose, **kwargs))
return data
|
detectlanguage/detectlanguage-python | setup.py | Python | mit | 843 | 0.027284 | #!/usr/bin/env python
from setuptools.depends import get_module_constant
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = 'detectlanguage',
packages = ['detectla | nguage'],
version = get_module_constant('detectlanguage', '__version__'),
description = 'Language Detection API | Client',
long_description=long_description,
long_description_content_type="text/markdown",
author = 'Laurynas Butkus',
author_email = 'info@detectlanguage.com',
url = 'https://github.com/detectlanguage/detectlanguage-python',
download_url = 'https://github.com/detectlanguage/detectlanguage-python',
keywords = ['language', 'identification', 'detection', 'api', 'client'],
install_requires= ['requests>=2.4.2'],
classifiers = [],
license = 'MIT',
)
|
csferrie/python-qinfer | src/qinfer/distributions.py | Python | agpl-3.0 | 51,767 | 0.004057 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# distributions.py: module for probability distributions.
##
# © 2017, Chris Ferrie (csferrie@gmail.com) and
# Christopher Granade (cgranade@cgranade.com).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
## IMPORTS ###################################################################
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future.utils import with_metaclass
import numpy as np
import scipy.stats as st
import scipy.linalg as la
from scipy.interpolate import interp1d
from scipy.integrate import cumtrapz
from scipy.spatial import ConvexHull, Delaunay
from functools import partial
import abc
from qinfer import utils as u
from qinfer.metrics import rescaled_distance_mtx
from qinfer.clustering import particle_clusters
from qinfer._exceptions import ApproximationWarning
import warnings
## EXPORTS ###################################################################
__all__ = [
'Distribution',
'SingleSampleMixin',
'MixtureDistribution',
'ParticleDistribution',
'ProductDistribution',
'UniformDistribution',
'DiscreteUniformDistribution',
'MVUniformDistribution',
'ConstantDistribution',
'NormalDistribution',
'MultivariateNormalDistribution',
'SlantedNormalDistribution',
'LogNormalDistribution',
'BetaDistribution',
'DirichletDistribution',
'BetaBinomialDistribution',
'GammaDistribution',
'GinibreUniform',
'HaarUniform',
'HilbertSchmidtUniform',
'PostselectedDistribution',
'ConstrainedSumDistribution',
'InterpolatedUnivariateDistribution'
]
## FUNCTIONS #################################################################
def scipy_dist(name, *args, **kwargs):
"""
Wraps calling a scipy.stats distribution to allow for pickling.
See https://github.com/scipy/scipy/issues/3125.
"""
return getattr(st, name)(*args, **kwargs)
## ABSTRACT CLASSES AND MIXINS ###############################################
class Distribution(with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class for probability distributions on one or more random
variables.
"""
@abc.abstractproperty
def n_rvs(self):
"""
The number of random variables that this distribution is over.
:type: `int`
"""
pass
@abc.abstractmethod
def sample(self, n=1):
"""
Returns one or more samples from this probability distribution.
:param int n: Number of samples to return.
:rtype: numpy.ndarray
:return: An array containing samples from the
distribution of shape ``(n, d)``, where ``d`` is the number of
random variables.
"""
pass
class SingleSampleMixin(with_metaclass(abc.ABCMeta, object)):
"""
Mixin class that extends a class so as to generate multiple samples
correctly, given a method ``_sample`` that generates one sample at a time.
"""
@abc.abstractmethod
def _sample(self):
pass
def sample(self, n=1):
samples = np.zeros((n, self.n_rvs))
for idx in range(n):
samples[idx, :] = self._sample()
return samples
## CLASSES ###################################################################
class MixtureDistribution(Distribution):
r"""
Samples from a weighted list of distr | ibutions.
:param weights: Length ``n_dist`` list or ``np.ndarray``
of probabilites summing to 1.
:param dist: Either a length ``n_dist`` list of ``Distribution`` instances,
or a ``D | istribution`` class, for example, ``NormalDistribution``.
It is assumed that a list of ``Distribution``s all
have the same ``n_rvs``.
:param dist_args: If ``dist`` is a class, an array
of shape ``(n_dist, n_rvs)`` where ``dist_args[k,:]`` defines
the arguments of the k'th distribution. Use ``None`` if the distribution
has no arguments.
:param dist_kw_args: If ``dist`` is a class, a dictionary
where each key's value is an array
of shape ``(n_dist, n_rvs)`` where ``dist_kw_args[key][k,:]`` defines
the keyword argument corresponding to ``key`` of the k'th distribution.
Use ``None`` if the distribution needs no keyword arguments.
:param bool shuffle: Whether or not to shuffle result after sampling. Not shuffling
will result in variates being in the same order as
the distributions. Default is ``True``.
"""
def __init__(self, weights, dist, dist_args=None, dist_kw_args=None, shuffle=True):
super(MixtureDistribution, self).__init__()
self._weights = weights
self._n_dist = len(weights)
self._shuffle = shuffle
try:
self._example_dist = dist[0]
self._is_dist_list = True
self._dist_list = dist
assert(self._n_dist == len(self._dist_list))
except:
self._is_dist_list = False
self._dist = dist
self._dist_args = dist_args
self._dist_kw_args = dist_kw_args
assert(self._n_dist == self._dist_args.shape[0])
self._example_dist = self._dist(
*self._dist_arg(0),
**self._dist_kw_arg(0)
)
def _dist_arg(self, k):
"""
Returns the arguments for the k'th distribution.
:param int k: Index of distribution in question.
:rtype: ``np.ndarary``
"""
if self._dist_args is not None:
return self._dist_args[k,:]
else:
return []
def _dist_kw_arg(self, k):
"""
Returns a dictionary of keyword arguments
for the k'th distribution.
:param int k: Index of the distribution in question.
:rtype: ``dict``
"""
if self._dist_kw_args is not None:
return {
key:self._dist_kw_args[key][k,:]
for key in self._dist_kw_args.keys()
}
else:
return {}
@property
def n_rvs(self):
return self._example_dist.n_rvs
@property
def n_dist(self):
"""
The number of distributions in the mixture distribution.
"""
return self._n_dist
def sample(self, n=1):
# how many samples to take from each dist
ns = np.random.multinomial(n, self._weights)
idxs = np.arange(self.n_dist)[ns > 0]
if self._is_dist_list:
# sample from each distribution
samples = np.concatenate([
self._dist_list[k].sample(n=ns[k])
for k in idxs |
gitmill/gitmill | django/repository/views.py | Python | mit | 245 | 0.004082 | from django. | shortcuts import render
from user.decorators import user_view
from repository.decorators import repository_view
@user_ | view
@repository_view
def repository(request, user, repository, **kwargs):
return render(request, 'app.html')
|
StongeEtienne/dipy | dipy/fixes/argparse.py | Python | bsd-3-clause | 85,208 | 0.000012 | # emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# Copyright 2006-2009 Steven J. Bethard <steven.bethard@gmail.com>.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.0.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'Namespace',
'Action',
'FileType',
'HelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter'
'ArgumentDefaultsHelpFormatter',
]
try:
_set = set
except NameError:
from sets import Set as _set
try:
_basestring = basestring
except NameError:
_basestring = str
try:
_sorted = sorted
except NameError:
def _sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
| return result
# silence Python 2.6 buggy warnings about Exception.message
if _sys.version_info[:2] == (2, 6):
import warnings
warnings.filterwarnings(
action='ignore',
message='BaseException.message has been deprecated as of Python 2.6',
category=DeprecationWarning,
module='argparse')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = ' | ==PARSER=='
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return _sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = f |
Batchyx/parsethisshit | parsethis.py | Python | gpl-3.0 | 21,672 | 0.045268 | #!/usr/bin/python
# -*- encoding:iso8859-15 -*-
"""A module to parse specificaly designed HTML calendars"""
# Copyright (C) 2005-2009 Batchyx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
try:
import psyco
except:
print >> sys.stderr, "psyco not availlable"
pass
import time
from sgmllib import SGMLParser
from extendablelist import *
import icalendarevent
import calendarhelper
from re import compile
hoursregex=compile(
r'(?P<starthour>[0-9]{1,2})[Hh:](?P<startmin>[0-9]{0,2})'
r'('
r'-'
r'(?P<endhour>[0-9]{1,2})[hH:](?P<endmin>[0-9]{0,2})'
r')'
)
this_year = time.strftime('%Y', time.localtime())
next_year = str(int(this_year)+1)
dateregex = compile(
r'(?P<mday>[0-9]{1,2})[/]'
r'(?P<month>[0-9]{1,2})'
r'([/](?P<year>%s(%s|%s)?))?'%(
this_year[:2], this_year[2:],next_year[2:]
)
)
class TimeTableUnparsableYet(NotImplementedError):
"""This Error indicate that this programm cannot yet parse this
kind of timetable yet."""
class TableParser(SGMLParser):
"""An HTML <Table> parser with spanning support
inherited from sgml.SGMLParser"""
def __init__(self):
SGMLParser.__init__(self)
self.rows=[]
self.currentrow=None
self.readdata="" # current cell content
self.handle_data=self.handle_comment=self.handle_nothing
self.linenum=0
def handle_nothing(self,data):
pass
def start_tr(self,attr):
if self.linenum > len(self.rows)-1:
self.currentrow=extendablelist()
self.rows.append(self.currentrow)
else:
# the line already has part of a rowspaned cell
self.currentrow=self.rows[self.linenum]
def start_td(self,attr):
self.readdata=""
self.cellattr=dict(attr)
self.handle_data=self.handle_content
def handle_content(self,data):
self.readdata+=data
def end_td(self):
colspan=int(self.cellattr.get('colspan',1))
rowspan=int(self.cellattr.get('rowspan',1))
self.handle_data=self.handle_nothing
cell=self.generate_cell(self.readdata,self.cellattr)
first=self.currentrow.append(cell)
if cell is not None:
cell.set_position(first,self.linenum, colspan,rowspan)
for a in range(1,colspan):
# duplicate the reference on all the positions.
self.currentrow.append(cell)
if rowspan==1:
return
for line in range(self.linenum+1,self.linenum+1+rowspan-1):
# the next line may not already exist.
if line == len(self.rows):
self.rows.append(extendablelist())
for col in range(first,first+colspan):
self.rows[line][col]=cell
def generate_cell(self,text,parms):
color=parms.get('bgcolor','white')
text = text.replace('\n',' ').replace('\r',' ').replace('*','')
text = text.strip()
if not text:
return None
return Cell(text,color)
def end_tr(self):
self.currentrow=None
self.linenum+=1
def __repr__(self):
return "<TableParser[%s]>"%(
"\n".join(
map(repr,self.rows)
))
def __str__(self):
"""Get a CSV (Comma Separated Value) from this table"""
result=""
for row in self.rows:
first=""
for value in row:
if value is None:
value=""
result+="%s \"%s\""%(first, value)
# first=","
result+="\n"
return result
class Cell:
"""Representation of a cell."""
# TODO : move some content parsing there.
def __init__(self,content,color):
self.content=content
self.colordata=color
self.x = self.y = self.xspan = self.yspan = None
def set_position(self,x,y,xspan,yspan):
self.x = x
self.y = y
self.xspan = xspan
self.yspan = yspan
def __repr__(self):
return "<Cell(%s,%s) at (%d+%d,%d+%d)"%(
repr(self.content),repr(self.colordata),
self.x, self.y, self.xspan, self.yspan
)
def color(self):
return self.colordata
def data(self):
return self.content
__str__=data
def __nonzero__(self):
return bool(self.content)
def __equal__(self,other):
if isinstance(other,Cell):
return (self.colordata==other.colordata and
self.content == other.content)
def get_time(self):
"""get_time() => (from_hour,from_min,to_hour,to_min)
return None if this cell doesn't contain a date.
"""
hoursreg = hoursregex.search(self.content)
if hoursreg is None:
return None
starthour=hoursreg.group('starthour')
starthour=int(starthour)
startmin=hoursreg.group('startmin')
if not startmin:
startmin=0
else:
startmin=int(startmin)
endhour = hoursreg.group('endhour')
endhour = int(endhour)
endmin=hoursreg.group('endmin')
if not endmin:
endmin=0
else:
endmin=int(endmin)
return starthour,startmin,endhour,endmin
class Group(object):
def __init__(self,name,startline,endline,grouptype):
self.name = name
self.startline=startline
self.endline=endline
self.grouptype = grouptype
def __cmp__(self, other):
return self.name == other.name
def get_name(self):
return self.name
def __repr__(self):
return "Group(%s,%s,%s,%s)"%(self.name, self.startline,
self.endline, self.grouptype)
class TimeTableParser(TableParser):
def __init__(self):
TableParser.__init__(self)
self.daycols=None
self.weekspos=None
self.times=None
self.defaulttimes=None
self.day_of_week_header_line = None
self.first_date = None
self.groups_max_x = None
self.header_max_x = 0
def end_td(self):
# TODO: is that still needed for xlhtml output ?!?
tmp=self.readdata
try:
tmp.decode('utf-8')
except UnicodeDecodeError:
tmp = tmp.decode('windows-1252').encode('utf-8')
TableParser.end_td(self)
def prepare_parser(self):
"""Get sufficent layout information to start parsing
extract day, hours and weeks positions, and the first
date of this calendar"""
self.extract_days_positions()
# detect_one_week_only depends on week days positions
self.detect_one_week_only()
self.extract_time_positions()
self.extract_first_date()
# extract_weeks_positions might need the first date.
self.extract_weeks_positions()
def set_default_times(self, defaulttimes):
"""Set default time if the time header is not found
defaulttimes is a sequence of tuples,
see Cell.get_time() for the format to use"""
self.defaulttimes = | defaulttimes
def header_cell_found(self, cell):
if self.groups_max_x is None:
self.groups_max_x = cell.x
else:
self.groups_max_x = min(cell.x, self.groups_max_x)
def | extract_days_positions(self):
"""Find days positions."""
days = ['lundi','mardi','mercredi','jeudi','vendredi']
for linenum, row in enumerate(self.rows):
self.daycols=[]
for x, cell in enumerate(row):
if not cell:
continue
value = cell.data().lower()
current_day = len(self.daycols)
if current_day >= len(days):
break
# the simple case.
if days[current_day] in value:
self.daycols.append(x)
continue
found_day = None
for day_index, day in enumerate(
days[current_day:],
current_day):
if day in value:
found_day = day_index
# Sigh... a day is missing.
break
else:
continue
self.daycols.extend([None]* (
found_day - current_day))
self.daycols.append(x)
else:
continue
self.day_of_week_header_line = linenum
break
else:
raise TimeTableUnparsableYet(
"Cannot find an header with days")
def detect_one_week_only(self):
"""Find the first date of this timetable"""
self.single_week_found = False
for cell in self.rows[self.day_of_week_header_line]:
if cell is None:
continue
matchobj = dateregex.search(cell.data())
if matchobj is None:
continue
self.header_cell_found(cell)
mday = matchobj.group('mday')
month = matchobj.group('month')
year = matchobj.group('year')
self.fir |
timabbott/zulip | zerver/lib/test_classes.py | Python | apache-2.0 | 43,104 | 0.004918 | import base64
import os
import re
import shutil
import tempfile
import urllib
from contextlib import contextmanager
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Union
from unittest import mock
import ujson
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.state import StateApps
from django.db.utils import IntegrityError
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from django.test.client import BOUNDARY, MULTIPART_CONTENT, encode_multipart
from django.test.testcases import SerializeMixin
from django.urls import resolve
from django.utils import translation
from fakeldap import MockLDAP
from two_factor.models import PhoneDevice
from zerver.decorator import do_two_f | actor_login
from zerver.lib.actions import (
bulk_add_subscriptions,
bulk_remove_subscriptions,
check_send_message,
check_send_stream_message,
gather_subscriptions,
)
from zerver.lib.initial_password import initial_password
from zerver.lib.sessions import get_session_dict_user
from zerve | r.lib.stream_subscription import get_stream_subscriptions_for_user
from zerver.lib.streams import (
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
)
from zerver.lib.test_helpers import find_key_by_email, instrument_url
from zerver.lib.users import get_api_key
from zerver.lib.validator import check_string
from zerver.lib.webhooks.common import get_fixture_http_headers, standardize_headers
from zerver.models import (
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserProfile,
clear_supported_auth_backends_cache,
flush_per_request_caches,
get_client,
get_display_recipient,
get_realm,
get_realm_stream,
get_stream,
get_system_bot,
get_user,
get_user_by_delivery_email,
)
from zerver.openapi.openapi import validate_against_openapi_schema
from zerver.tornado.event_queue import clear_client_event_queues_for_testing
from zilencer.models import get_remote_server_by_uuid
class UploadSerializeMixin(SerializeMixin):
"""
We cannot use override_settings to change upload directory because
because settings.LOCAL_UPLOADS_DIR is used in url pattern and urls
are compiled only once. Otherwise using a different upload directory
for conflicting test cases would have provided better performance
while providing the required isolation.
"""
lockfile = 'var/upload_lock'
@classmethod
def setUpClass(cls: Any, *args: Any, **kwargs: Any) -> None:
if not os.path.exists(cls.lockfile):
with open(cls.lockfile, 'w'): # nocoverage - rare locking case
pass
super().setUpClass(*args, **kwargs)
class ZulipTestCase(TestCase):
# Ensure that the test system just shows us diffs
maxDiff: Optional[int] = None
def setUp(self) -> None:
super().setUp()
self.API_KEYS: Dict[str, str] = {}
def tearDown(self) -> None:
super().tearDown()
# Important: we need to clear event queues to avoid leaking data to future tests.
clear_client_event_queues_for_testing()
clear_supported_auth_backends_cache()
flush_per_request_caches()
translation.activate(settings.LANGUAGE_CODE)
# Clean up after using fakeldap in ldap tests:
if hasattr(self, 'mock_ldap') and hasattr(self, 'mock_initialize'):
if self.mock_ldap is not None:
self.mock_ldap.reset()
self.mock_initialize.stop()
'''
WRAPPER_COMMENT:
We wrap calls to self.client.{patch,put,get,post,delete} for various
reasons. Some of this has to do with fixing encodings before calling
into the Django code. Some of this has to do with providing a future
path for instrumentation. Some of it's just consistency.
The linter will prevent direct calls to self.client.foo, so the wrapper
functions have to fake out the linter by using a local variable called
django_client to fool the regext.
'''
DEFAULT_SUBDOMAIN = "zulip"
TOKENIZED_NOREPLY_REGEX = settings.TOKENIZED_NOREPLY_EMAIL_ADDRESS.format(token="[a-z0-9_]{24}")
def set_http_headers(self, kwargs: Dict[str, Any]) -> None:
if 'subdomain' in kwargs:
kwargs['HTTP_HOST'] = Realm.host_for_subdomain(kwargs['subdomain'])
del kwargs['subdomain']
elif 'HTTP_HOST' not in kwargs:
kwargs['HTTP_HOST'] = Realm.host_for_subdomain(self.DEFAULT_SUBDOMAIN)
# set User-Agent
if 'HTTP_AUTHORIZATION' in kwargs:
# An API request; use mobile as the default user agent
default_user_agent = "ZulipMobile/26.22.145 (iOS 10.3.1)"
else:
# A webapp request; use a browser User-Agent string.
default_user_agent = ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) " +
"AppleWebKit/537.36 (KHTML, like Gecko) " +
"Chrome/79.0.3945.130 Safari/537.36")
if kwargs.get('skip_user_agent'):
# Provide a way to disable setting User-Agent if desired.
assert 'HTTP_USER_AGENT' not in kwargs
del kwargs['skip_user_agent']
elif 'HTTP_USER_AGENT' not in kwargs:
kwargs['HTTP_USER_AGENT'] = default_user_agent
def validate_api_response_openapi(self, url: str, method: str, result: HttpResponse) -> None:
"""
Validates all API responses received by this test against Zulip's API documentation,
declared in zerver/openapi/zulip.yaml. This powerful test lets us use Zulip's
extensive test coverage of corner cases in the API to ensure that we've properly
documented those corner cases.
"""
if not (url.startswith("/json") or url.startswith("/api/v1")):
return
try:
content = ujson.loads(result.content)
except ValueError:
return
url = re.sub(r"\?.*", "", url)
validate_against_openapi_schema(content,
url.replace("/json/", "/").replace("/api/v1/", "/"),
method, str(result.status_code))
@instrument_url
def client_patch(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
"""
We need to urlencode, since Django's function won't do it for us.
"""
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
result = django_client.patch(url, encoded, **kwargs)
self.validate_api_response_openapi(url, "patch", result)
return result
@instrument_url
def client_patch_multipart(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
"""
Use this for patch requests that have file uploads or
that need some sort of multi-part content. In the future
Django's test client may become a bit more flexible,
so we can hopefully eliminate this. (When you post
with the Django test client, it deals with MULTIPART_CONTENT
automatically, but not patch.)
"""
encoded = encode_multipart(BOUNDARY, info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
result = django_client.patch(
url,
encoded,
content_type=MULTIPART_CONTENT,
**kwargs)
self.validate_api_response_openapi(url, "patch", result)
return result
@instrument_url
def client_put(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> HttpResponse:
encoded = urllib.parse.urlencode(info)
django_client = self.client # see WRAPPER_COMMENT
self.set_http_headers(kwargs)
return django_client.put(url, encoded, **kwargs)
@instrument_url
def client_delete(self, url: str, info: Dict[str, Any]={}, **kwargs: Any) -> Htt |
dgreisen/u2db | u1db/remote/utils.py | Python | gpl-3.0 | 800 | 0 | # Copyright 2012 Canonical Ltd.
#
# This file is part of u1db.
#
# u1db is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# u1db is distributed in the hope that it will be useful,
# but | WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License f | or more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with u1db. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for details of the procotol."""
def check_and_strip_comma(line):
if line and line[-1] == ',':
return line[:-1], True
return line, False
|
nmercier/linux-cross-gcc | win32/bin/Lib/lib2to3/fixes/fix_unicode.py | Python | bsd-3-clause | 1,311 | 0.002288 | r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapp | ing = {u"unichr" : u"chr", u"unicode" : u"str"}
|
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val:
val = ur'\\'.join([
v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U')
for v in val.split(ur'\\')
])
if val[0] in u'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
|
dezede/dezede | libretto/migrations/0044_auto_20190917_1200.py | Python | bsd-3-clause | 987 | 0.002033 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-17 10:00
from __future__ import unicode_literals
f | rom django.db import migrations, models
import django.db.models.deletion
class Migration(mig | rations.Migration):
dependencies = [
('libretto', '0043_auto_20190905_1126'),
]
operations = [
migrations.AlterModelOptions(
name='source',
options={'ordering': ('date', 'titre', 'numero', 'parent__date', 'parent__titre', 'parent__numero', 'position', 'page', 'lieu_conservation', 'cote'), 'permissions': (('can_change_status', 'Peut changer l’état'),), 'verbose_name': 'source', 'verbose_name_plural': 'sources'},
),
migrations.AlterField(
model_name='auteur',
name='profession',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='auteurs', to='libretto.Profession', verbose_name='profession'),
),
]
|
nesl/sos-2x | modules/unit_test/modules/kernel/post_raw/source_trick/reciever/source_trick_reciever.py | Python | bsd-3-clause | 3,085 | 0.014263 | import sys
import os
import pysos
import signal
# these two variables should be changed depending on the test drivers PID
# and the type of message it will be sending, If you are using the generic_test.c
# then it is likely these two values can stay the same
TEST_MODULE = 0x81
MSG_TEST_DATA= 33
ALARM_LEN = 60
START_DATA = 100
FINAL_DATA = 200
TEST_FAIL = 155
TEST_PASS = 255
# variables holding new and old sensor values
# this can be replaces with whatever you want since this is specific to
# what the test driver expects for data
oldstate = {}
state = {}
# a signal handler that will go off for an alarm
# it is highly suggested that you use this since it is the easiest way to test if your
# node has entered panic mode via the script
def panic_handler(signum, frame):
print "it is highly likely that your node has entered panic mode"
print "please reset the node"
sys.exit(1)
# message handler for messages of type MSG_DATA_READY
def generic_test(msg):
""" Small example of test driver usage. It simulates a virtual
dice and shows which side of the dice is up.
"""
global oldstate
global state
print "message recieved"
signal.alarm(ALARM_LEN)
#unpack the values we are expecting, in this case it is a node id, the acclerometer id,
# and a value from the | sensor
(node_id, node_state, data) = pysos.unpack("<BBB", msg['data'])
if node_id not in state.keys():
state[node_id] = 0
oldstate[node_id] = 0
# these are some simple calculations to test the sensor value we have gotten
# this is the part which you need to fill in in order to verify that the function is working
if (node_st | ate == START_DATA):
print "initialization began correctly"
if (node_state == 0):
state[node_id] = data
if (node_state == TEST_FAIL):
print >> sys.stderr, "the test for item %d has failed" %data
if (node_state == TEST_PASS):
print "the test for item %d has passed" %data
if (node_state == 1 and state[node_id] != data):
print >> sys.stderr, " a message was lost somewhere on node %d before count %d" %(node_id,data)
if (node_state == FINAL_DATA):
print "finalization worked correctly"
if __name__ == "__main__":
# here we set up a connection to sossrv using the pysos module
# and begin listening for messages
# we also register our function above with the server so that it is called
# when the appropriate message type is recieved
srv = pysos.sossrv()
srv.register_trigger(generic_test, sid=TEST_MODULE, type=MSG_TEST_DATA)
# register the signal handler and begin an alarm that will wait for 60 seconds before going off
# other times for the alarm might be good, use your own judgement based on your test
signal.signal(signal.SIGALRM, panic_handler)
signal.alarm(ALARM_LEN)
# we do this so since the test_suite application has information regarding the amount of time
# each test should be run. after the amount of time specified in test.lst, test_suite will
# end this script and move to another test
while(1):
continue
|
htygithub/bokeh | bokeh/models/widgets/dialogs.py | Python | bsd-3-clause | 1,502 | 0.001332 | """ Various kinds of dialog and message box widgets. """
from __future__ import absolute_import
from ...properties import Bool, String, List, Instance, Either
from .widget import Widget
from .layouts import BaseBox, HBox
from .buttons import Button
class Dialog(Widget):
""" Simple dialog box with string message.
"""
visible = Bool(False, help="""
Whether this dialog is visible or not.
""")
# TODO (bev) : "closeable" would be more common spelling
closable = Bool(True, help="""
Whether this dialog is closable or not.
""")
title = String(default="", help="""
The title of the dialog widget.
""")
conten | t = Either(String(), Instance(BaseBox), default="", help="""
Either a message to be displayed by this dialog or a BaseBox to be used
as dialog body.
""")
buttons = List(Instance(Button), help="""
A list of buttons to be placed on the bottom of the dialog.
" | "")
buttons_box = Instance(BaseBox, help="""
A BaseBox with buttons to be used as dialog footer.
""")
def __init__(self, **kwargs):
if "buttons" in kwargs and "buttons_box" in kwargs:
raise ValueError("'buttons' keyword cannot be used with 'buttons_box' argument")
if 'buttons' in kwargs:
kwargs['buttons_box'] = HBox(children=kwargs['buttons'])
elif 'buttons_box' in kwargs:
kwargs['buttons'] = kwargs['buttons_box'].children
super(Dialog, self).__init__(**kwargs)
|
antoinecarme/pyaf | tests/artificial/transf_Logit/trend_MovingAverage/cycle_0/ar_/test_artificial_32_Logit_MovingAverage_0__100.py | Python | bsd-3-clause | 264 | 0.087121 | import pyaf.Bench.TS_datasets as tsds
i | mport tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "Logit", sigma = 0.0, exog_count = 100, ar_orde | r = 0); |
AnykeyNL/uArmProPython | svg_example.py | Python | gpl-3.0 | 1,467 | 0.019087 | # Example made by OssiLehtinen
#
from svgpathtools import svg2paths, wsvg
import numpy as np
import uArmRobot
import time
#Configure Serial Port
#serialport = "com3" # for windows
serialport = "/dev/ttyACM0" # for linux like system
# Connect to uArm
myRobot = uArmRobot.robot(serialport,0) # user 0 for firmware < v4 and use 1 for firmware v4
myRobot.debug = True # Enable / Disable debug output on screen, by default disabled
myRobot.connect()
myRobot.mode(1) # Set mode to Normal
# Read in the svg
paths, attributes = svg2paths('drawing.svg')
scale = .25
steps_per_seg = 3
coords = []
x_offset = 200
height = 90
draw_speed = 1000
# Convert the paths to a list of coordinates
for i in range(len(paths)):
path = paths[i]
attribute = attributes[i]
# A crude check for whether a path should be drawn. Does it have a style defined?
if 'style' in attribute:
for seg in path:
segcoords = []
for p in range(steps_per_seg+1):
cp = seg.point(float(p)/float(steps_per_seg))
segcoords.append([-np.real(cp)*scale+x_offset | , np.imag(cp)*scale])
coords.append(segcoords)
# The starting point
myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000)
for seg in coords:
myRobot.goto(seg[0][0], seg[0][1], height, 6000)
time.sleep(0.15)
for p in seg:
myRobot.goto_laser(p[0], p[1], height, draw_speed)
# B | ack to the starting point (and turn the laser off)
myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000)
|
avathardev/matchine-learning | kmeans.py | Python | mit | 3,350 | 0.000896 | import json
import math
import random
import os
class KMeans(object):
# TO-DO: Richard
def __init__(self, dataset=None):
file_path = os.path.dirname(os.path.realpath(__file__))
if dataset is None:
self.mega_dataset = json.loads(open(file_path + '/dataset.json', 'r').read())
else:
self.mega_dataset = json.loads(dataset)
def _ED(self, point1, point2):
result = 0
for i in xrange(len(point1)):
result += pow(point2[i] - point1[i], 2)
return math.sqrt(result)
def _closest(self, datum, centroids):
closest_index = None
closest_distance = None
for i, point in enumerate(centroids):
dist = self._ED(datum, point)
if closest_index is None or dist < closest_distance:
closest_index = i
closest_distance = dist
return closest_index
def _avg(self, li):
return sum(li) / float(len(li))
def _get_centroid(self, data):
try:
datum_len = range(len(next(iter(data))))
result = [0 for x in datum_len]
for datum in data:
for i, value in enumerate(datum):
result[i] += value
for i in datum_len:
result[i] /= float(len(data))
return tuple(result)
except StopIteration:
return ([0, 0, 0])
def _kmeans(self, k, iterations=100):
clusters = [set() for _ in xrange(k)]
centroids = random.sample(self.dataset, k)
# init data to clusters
for datum in self.dataset:
i = random.choice(range(k))
clusters[i].add(datum)
for _ in xrange(iterations):
for datum in self.dataset:
# remove from clusters
for c in clusters:
try:
c.remove(datum)
except KeyError:
pass
# get closest centroid index
closest_index = self._closest(datum, centroids)
# add to the new cluster
clusters[closest_index].add(datum)
| # update centroids
centroids = [self._get_centroid(c) for c in clusters]
return clusters, centroids
def calculate(self, attr, to_file=False):
self.dataset = []
for data in self.mega_data | set[attr]:
self.dataset.append(tuple(data))
self.dataset = set(self.dataset)
champ2stat = {}
for i in xrange(len(self.mega_dataset['champions'])):
champ2stat[tuple(self.mega_dataset[attr][i])] = self.mega_dataset['champions'][i]
clusters, centroids = self._kmeans(len(self.mega_dataset[attr][0]), 100)
champ2cluster = []
for i, c in enumerate(clusters):
new_c = []
champ2cluster.append(new_c)
new_c.append(tuple(centroids[i]))
for champ in c:
new_c.append(champ2stat[champ])
if to_file:
f = open('output/' + attr + '_output.json', 'w')
f.write(json.dumps(champ2cluster, indent=4))
f.close()
return champ2cluster
# Example:
|
AxelDelmas/ansible | lib/ansible/parsing/__init__.py | Python | gpl-3.0 | 11,072 | 0.002619 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import stat
from yaml import load, YAMLError
from six import text_type
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.parsing.vault import VaultLib
from ansible.parsing.splitter import unquote
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
from ansible.utils.path import unfrackpath
from ansible.utils.unicode import to_unicode
class DataLoader():
'''
The DataLoader class is used to load and parse YAML or JSON content,
either from a given file name or from a string that was previously
read in through other means. A Vault password can be specified, and
any vault-encrypted files will be decrypted.
Data read from files will also be cached, so the file will never be
read from disk more than once.
Usage:
dl = DataLoader()
(or)
dl = DataLoader(vault_password='foo')
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
'''
def __init__(self):
self._basedir = '.'
self._FILE_CACHE = dict()
# initialize the vault stuff with an empty password
self.set_vault_password(None)
def set_vault_password(self, vault_password):
self._vault_password = vault_password
self._vault = VaultLib(password=vault_password)
def load(self, data, file_name='<string>', show_content=True):
'''
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
'''
try:
# we first try to load this data as JSON
return json.loads(data)
except:
# if loading JSON failed for any reason, we go ahead
# and try to parse it as YAML instead
if isinstance(data, AnsibleUnicode):
# The PyYAML's libyaml bindings use PyUnicode_CheckExact so
# they are unable to cope with our subclass.
# Unwrap and re-wrap the unicode so we can keep track of line
# numbers
new_data = text_type(data)
else:
new_data = data
try:
new_data = self._safe_load(new_data, file_name=file_name)
except YAMLError as yaml_exc:
self._handle_error(yaml_exc, file_name, show_content)
if isinstance(data, AnsibleUnicode):
new_data = AnsibleUnicode(new_data)
new_data.ansible_pos = data.ansible_pos
return new_data
def load_from_file(self, file_name):
''' Loads data from a file, which can contain either JSON or YAML. '''
file_name = self.path_dwim(file_name)
# if the file has already been read in and cached, we'll
# return those results to avoid more file/vault operations
if file_name in self._FILE_CACHE:
parsed_data = self._FILE_CACHE[file_name]
else:
# read the file contents and load the data structure from them
(file_data, show_content) = self._get_file_contents(file_name)
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
# cache the file contents for next time
self._FILE_CACHE[file_name] = pa | rsed_data
# return a deep copy here, so the cache is not affected
return copy.deepcopy(parsed_data | )
def path_exists(self, path):
path = self.path_dwim(path)
return os.path.exists(path)
def is_file(self, path):
path = self.path_dwim(path)
return os.path.isfile(path) or path == os.devnull
def is_directory(self, path):
path = self.path_dwim(path)
return os.path.isdir(path)
def list_directory(self, path):
path = self.path_dwim(path)
return os.listdir(path)
def is_executable(self, path):
'''is the given path executable?'''
path = self.path_dwim(path)
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def _safe_load(self, stream, file_name=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
loader = AnsibleLoader(stream, file_name)
try:
return loader.get_single_data()
finally:
loader.dispose()
def _get_file_contents(self, file_name):
'''
Reads the file contents from the given file name, and will decrypt them
if they are found to be vault-encrypted.
'''
if not file_name or not isinstance(file_name, basestring):
raise AnsibleParserError("Invalid filename: '%s'" % str(file_name))
if not self.path_exists(file_name) or not self.is_file(file_name):
raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name)
show_content = True
try:
with open(file_name, 'rb') as f:
data = f.read()
if self._vault.is_encrypted(data):
data = self._vault.decrypt(data)
show_content = False
data = to_unicode(data, errors='strict')
return (data, show_content)
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)))
def _handle_error(self, yaml_exc, file_name, show_content):
'''
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
'''
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
def get_basedir(self):
''' returns the current basedir '''
return self._basedir
def set_basedir(self, basedir):
''' sets the base directory, used to find files when a relative path is given '''
if basedir is not None:
self._basedir = to_unicode(basedir)
def path_dwim(self, given):
'''
make relative paths work like folks expect.
'''
given = unquote(given)
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
return os.path.abspath(os.path.join(self._basedir, given))
def path_dwim_relative(self, path, dirname, source):
''' find one file in a role/playbook dirs with/without dirname subdir '''
search = []
isrole = False
# I have full path, nothing else needs to be looked at
if source.startswi |
patrick-winter-knime/mol-struct-nets | molstructnets/experimentbatch/batch_entry.py | Python | gpl-3.0 | 1,435 | 0.000697 | from util import file_util
class BatchEntry:
def __init__(self, csv_line, experiment_location=None):
values = csv_line.split(',')
self.experiment_location = experiment_location
self.experiment = BatchEntry.get_value(values, 0)
if s | elf.experiment is None:
raise ValueError('Experiment is not defined')
self.data_set = BatchEntry.get_value(values, 1)
self.target = BatchEntry.get_value(values, 2)
self.partition = BatchEntry | .get_value(values, 3)
def get_execution_arguments(self):
arguments = list()
if self.experiment_location is not None:
arguments.append(file_util.resolve_subpath(self.experiment_location, self.experiment))
else:
arguments.append(file_util.resolve_path(self.experiment))
if self.data_set is not None:
arguments.append('--data_set')
arguments.append(self.data_set)
if self.target is not None:
arguments.append('--target')
arguments.append(self.target)
if self.partition is not None:
arguments.append('--partition')
arguments.append(self.partition)
return arguments
@staticmethod
def get_value(values, index):
if len(values) < index + 1:
return None
value = values[index].strip()
if value == '':
return None
return value
|
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/email/generator.py | Python | gpl-3.0 | 19,988 | 0.0007 | # Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Classes to generate plain text from a message object tree."""
__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
import re
import sys
import time
import random
from copy import deepcopy
from io import StringIO, BytesIO
from email.utils import _has_surrogates
UNDERSCORE = '_'
NL = '\n' # XXX: no longer used by the code below.
fcre = re.compile(r'^From ', re.MULTILINE)
class Generator:
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=None, maxheaderlen=None, *,
policy=None):
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default if policy
is not set), escapes From_ lines in the body of the message by putting
a `>' in front of them.
Optional maxheaderlen specifies the longest length for a non-continued
header. Wh | en a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The defau | lt is 78, as recommended (but not required)
by RFC 2822.
The policy keyword specifies a policy object that controls a number of
aspects of the generator's operation. If no policy is specified,
the policy associated with the Message object passed to the
flatten method is used.
"""
if mangle_from_ is None:
mangle_from_ = True if policy is None else policy.mangle_from_
self._fp = outfp
self._mangle_from_ = mangle_from_
self.maxheaderlen = maxheaderlen
self.policy = policy
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False, linesep=None):
r"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
linesep specifies the characters used to indicate a new line in
the output. The default value is determined by the policy specified
when the Generator instance was created or, if none was specified,
from the policy associated with the msg.
"""
# We use the _XXX constants for operating on data that comes directly
# from the msg, and _encoded_XXX constants for operating on data that
# has already been converted (to bytes in the BytesGenerator) and
# inserted into a temporary buffer.
policy = msg.policy if self.policy is None else self.policy
if linesep is not None:
policy = policy.clone(linesep=linesep)
if self.maxheaderlen is not None:
policy = policy.clone(max_line_length=self.maxheaderlen)
self._NL = policy.linesep
self._encoded_NL = self._encode(self._NL)
self._EMPTY = ''
self._encoded_EMTPY = self._encode('')
# Because we use clone (below) when we recursively process message
# subparts, and because clone uses the computed policy (not None),
# submessages will automatically get set to the computed policy when
# they are processed by this code.
old_gen_policy = self.policy
old_msg_policy = msg.policy
try:
self.policy = policy
msg.policy = policy
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
self.write(ufrom + self._NL)
self._write(msg)
finally:
self.policy = old_gen_policy
msg.policy = old_msg_policy
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp,
self._mangle_from_,
None, # Use policy setting, which we've adjusted
policy=self.policy)
#
# Protected interface - undocumented ;/
#
# Note that we use 'self.write' when what we are writing is coming from
# the source, and self._fp.write when what we are writing is coming from a
# buffer (because the Bytes subclass has already had a chance to transform
# the data in its write method in that case). This is an entirely
# pragmatic split determined by experiment; we could be more general by
# always using write and having the Bytes subclass write method detect when
# it has already transformed the input; but, since this whole thing is a
# hack anyway this seems good enough.
# Similarly, we have _XXX and _encoded_XXX attributes that are used on
# source and buffer data, respectively.
_encoded_EMPTY = ''
def _new_buffer(self):
# BytesGenerator overrides this to return BytesIO.
return StringIO()
def _encode(self, s):
# BytesGenerator overrides this to encode strings to bytes.
return s
def _write_lines(self, lines):
# We have to transform the line endings.
if not lines:
return
lines = lines.splitlines(True)
for line in lines[:-1]:
self.write(line.rstrip('\r\n'))
self.write(self._NL)
laststripped = lines[-1].rstrip('\r\n')
self.write(laststripped)
if len(lines[-1]) != len(laststripped):
self.write(self._NL)
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a buffer. The we write the
# headers and the buffer contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._munge_cte = None
self._fp = sfp = self._new_buffer()
self._dispatch(msg)
finally:
self._fp = oldfp
munge_cte = self._munge_cte
del self._munge_cte
# If we munged the cte, copy the message again and re-fix the CTE.
if munge_cte:
msg = deepcopy(msg)
msg.replace_header('content-transfer-encoding', munge_cte[0])
msg.replace_header('content-type', munge_cte[1])
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
speci |
gakarak/BTBDB_ImageAnalysisSubPortal | app/core/preprocessing_test.py | Python | apache-2.0 | 3,387 | 0.007676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import matplotlib.pyplot as plt
import numpy as np
import os
import glob
import nibabel as nib
import app.core.pre | processing as preproc
import unittest
class TestLungDividing(unittest.TestCase):
def setUp(self):
self.wdir = '../../experimental_data/resize-256x256x64'
def test_resize_nii(self):
self.assertTrue(os.path.isdir(self.wdir))
lstPathNii = sorted(glob.glob('%s/*.nii.gz' % self.wdir))
numNii = len(lstPathNii)
self.assertTrue((numNii > 0))
pathNii = lstPathNii[0]
newSize = (128,128,128)
niiResiz = preproc.resizeNii(pathNii | , newSize=newSize)
self.assertTrue(niiResiz.shape == newSize)
def test_divide_morphological(self):
self.assertTrue(os.path.isdir(self.wdir))
lstPathNii = sorted(glob.glob('%s/*-msk.nii.gz' % self.wdir))
numNii = len(lstPathNii)
self.assertTrue( (numNii>0) )
for ii,pathNii in enumerate(lstPathNii):
timg = preproc.niiImagePreTransform(nib.load(pathNii).get_data())
retMskLungs, retIsOk = preproc.makeLungedMask(timg)
# (1) check, that #lungs is non-zero for test-images
self.assertTrue(len(np.unique(retMskLungs))>0)
# (2) check ret-result: retIsOk=False if only one lung in data
tmp = np.unique(retMskLungs)
numLungs = int(np.sum( (tmp>0)&(tmp<3)))
if retIsOk:
self.assertTrue(numLungs, 2)
else:
self.assertTrue( numLungs, 1)
print ('\t[%d/%d] %s, #Lungs = %d, isOk = %s' % (ii, numNii, os.path.basename(pathNii), numLungs, retIsOk))
def test_lung_lesion_report(self):
tmpDir = '../../experimental_data/dataentry_test0/case-2c396a3e-1900-4fb4-bd3a-6763dc3f2ec0/study-dd10657e-f2c3-48ba-87d6-b5f3fc40c752'
fmskLung = os.path.join(tmpDir, 'series-1.3.6.1.4.1.25403.163683357445804.6452.20140120113751.2-CT.nii.gz-lungs.nii.gz')
fmskLesion = os.path.join(tmpDir, 'series-1.3.6.1.4.1.25403.163683357445804.6452.20140120113751.2-CT.nii.gz-lesion.nii.gz')
#
niiLung = nib.load(fmskLung)
niiLesion = nib.load(fmskLesion)
retInfo = preproc.prepareLesionDistribInfo(niiLung, niiLesion)
self.assertTrue(len(retInfo)>1)
def test_preview_generation(self):
tmpDir = '../../experimental_data/dataentry_test0/case-2c396a3e-1900-4fb4-bd3a-6763dc3f2ec0/study-dd10657e-f2c3-48ba-87d6-b5f3fc40c752'
fimgLung = '%s/series-1.3.6.1.4.1.25403.163683357445804.6452.20140120113751.2-CT.nii.gz' % tmpDir
fmskLung = '%s/series-1.3.6.1.4.1.25403.163683357445804.6452.20140120113751.2-CT.nii.gz-lungs.nii.gz' % tmpDir
fmskLesion = '%s/series-1.3.6.1.4.1.25403.163683357445804.6452.20140120113751.2-CT.nii.gz-lesion.nii.gz' % tmpDir
#
dataImg = preproc.normalizeCTImage(nib.load(fimgLung).get_data())
dataMsk = nib.load(fmskLung).get_data()
dataLes = nib.load(fmskLesion).get_data()
#
imgPreview = preproc.makePreview4Lesion(dataImg, dataMsk, dataLes)
self.assertTrue(np.min(imgPreview.shape[:2])>256)
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestLungDividing)
unittest.TextTestRunner(verbosity=2).run(suite)
|
DavidWhittingham/arcpyext | arcpyext/publishing/__init__.py | Python | bsd-3-clause | 362 | 0.008287 | try:
import arcpy.mapping
from ._publishing import (convert_desktop_map_to_service_draft as convert_map_to_service_draft,
| convert_toolbox_to_service_draft)
except:
from ._publishing import (convert_pro_map_to_service_draft as convert_map_to_servi | ce_draft,
convert_toolbox_to_service_draft)
|
kasioumis/invenio | invenio/modules/workflows/views/holdingpen.py | Python | gpl-2.0 | 16,341 | 0.000184 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Holding Pen is a web interface overlay for all BibWorkflowObject's.
This area is targeted to catalogers and administrators for inspecting
and reacting to workflows executions. More importantly, allowing users to deal
with halted workflows.
For example, accepting submissions or other tasks.
"""
import json
import os
from flask import (
Blueprint,
flash,
jsonify,
render_template,
request,
send_from_directory,
session,
url_for,
)
from flask.ext.breadcrumbs import default_breadcrumb_root, register_breadcrumb
from flask.ext.login import login_required
from flask.ext.menu import register_menu
from invenio.base.decorators import templated, wash_arguments
from invenio.base.i18n import _
from invenio.ext.principal import permission_required
from invenio.utils.date import pretty_date
from six import text_type
from ..acl import viewholdingpen
from ..api import continue_oid_delayed, start_delayed
from ..models import BibWorkflowObject, ObjectVersion, Workflow
from ..registry import actions, workflows
from ..utils import (extract_data, get_action_list,
get_formatted_holdingpen_object,
get_holdingpen_objects,
get_previous_next_objects,
get_rendered_task_results,
sort_bwolist)
blueprint = Blueprint('holdingpen', __name__, url_prefix="/admin/holdingpen",
template_folder='../templates',
static_folder='../static')
default_breadcrumb_root(blueprint, '.holdingpen')
HOLDINGPEN_WORKFLOW_STATES = {
ObjectVersion.HALTED: {
'message': _(ObjectVersion.name_from_version(ObjectVersion.HALTED)),
'class': 'danger'
},
ObjectVersion.WAITING: {
'message': _(ObjectVersion.name_from_version(ObjectVersion.WAITING)),
'class': 'warning'
},
ObjectVersion.ERROR: {
'message': _(ObjectVersion.name_from_version(ObjectVersion.ERROR)),
'class': 'danger'
},
ObjectVersion.COMPLETED: {
'message': _(ObjectVersion.name_from_version(ObjectVersion.COMPLETED)),
'class': 'success'
},
ObjectVersion.INITIAL: {
'message': _(ObjectVersion.name_from_version(ObjectVersion.INITIAL)),
'class': 'info'
},
ObjectVersion.RUNNING: {
'message': _(ObjectVersion.name_from_version(ObjectVersion.RUNNING)),
'class': 'warning'
}
}
@blueprint.route('/', methods=['GET', 'POST'])
@blueprint.route('/index', methods=['GET', 'POST'])
@login_required
@register_menu(blueprint, 'personalize.holdingpen', _('Your Pending Actions'))
@register_breadcrumb(blueprint, '.', _('Holdingpen'))
@templated('workflows/index.html')
def index():
"""
Display main interface of Holdingpen.
Acts as a hub for catalogers (may be removed)
"""
# FIXME: Add user filtering
bwolist = get_holdingpen_objects()
action_list = get_action_list(bwolist)
return dict(tasks=action_list)
@blueprint.route('/maintable', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.records', _('Records'))
@login_required
@permission_required(viewholdingpen.name)
@templated('workflows/maintable.html')
def maintable():
"""Display main table interface of Holdingpen."""
bwolist = get_holdingpen_objects()
action_list = get_action_list(bwolist)
tags = session.get(
"holdingpen_tags",
[ObjectVersion.name_from_version(ObjectVersion.HALTED)]
)
if 'version' in request.args:
for key, value in ObjectVersion.MAPPING.items():
if value == int(request.args.get('version')):
if key not in tags:
tags.append(key)
tags_to_print = []
for tag in tags:
if tag:
tags_to_print.append({
"text": str(_(tag)),
"value": tag,
})
return dict(bwolist=bwolist,
action_list=action_list,
tags=json.dumps(tags_to_print))
@blueprint.route('/details/<int:objectid>', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.details', _("Record Details"))
@login_required
@permission_required(viewholdingpen.name)
def details(objectid):
"""Display info about the object."""
from ..utils import get_workflow_info
from invenio.ext.sqlalchemy import db
from itertools import groupby
of = "hd"
bwobject = BibWorkflowObject.query.get_or_404(objectid)
previous_object, next_object = get_previous_next_objects(
session.get("holdingpen_current_ids"),
objectid
)
formatted_data = bwobject.get_formatted_data(of)
extracted_data = extract_data(bwobject)
action_name = bwobject.get_action()
if action_name:
action = actions[action_name]
rendered_actions = action().render(bwobject)
else:
rendered_actions = {}
if bwobject.id_parent:
history_objects_db_request = BibWorkflowObject.query.filter(
db.or_(BibWorkflowObject.id_parent == bwobject.id_parent,
BibWorkflowObject.id == bwobject.id_parent,
BibWorkflowObject.id == bwobject.id)).all()
else:
history_objects_db_request = BibWorkflowObject.query.filter(
db.or_(BibWorkflowObject.id_parent == bwobject.id,
BibWorkflowObject.id == bwobject.id)).all()
history_objects = {}
temp = groupby(history_objects_db_request,
lambda x: x.version)
for key, value in temp:
if key != ObjectVersion.RUNNING:
value = list(value)
value.sort(key=lambda x: x.modified, reverse=True)
history_objects[key] = value
history_objects = sum(history_objects.values(), [])
for obj in history_objects:
obj._class = HOLDING | PEN_WORKFLOW_STATES[obj.version]["class"]
obj.message = HOLDINGPEN_WORKFLOW_STATES[obj.version]["message"]
| results = get_rendered_task_results(bwobject)
workflow_definition = get_workflow_info(extracted_data['workflow_func'])
task_history = bwobject.get_extra_data().get('_task_history', [])
return render_template('workflows/details.html',
bwobject=bwobject,
rendered_actions=rendered_actions,
history_objects=history_objects,
bwparent=extracted_data['bwparent'],
info=extracted_data['info'],
log=extracted_data['logtext'],
data_preview=formatted_data,
workflow=extracted_data['w_metadata'],
task_results=results,
previous_object=previous_object,
next_object=next_object,
task_history=task_history,
workflow_definition=workflow_definition,
versions=ObjectVersion,
pretty_date=pretty_date,
workflow_class=workflows.get(extracted_data['w_metadata'].name),
)
@blueprint.route('/files/<int:object_id>/<path:filename>',
methods=['POST', 'GET'])
@login_required
@permission_required(viewholdingpen.name)
def get_file_from_task_result(object_id=None, filename=None):
"""Send the requested |
cgranade/qutip | qutip/tests/test_bofin_solvers.py | Python | bsd-3-clause | 32,389 | 0.000123 | """
Tests for qutip.nonmarkov.bofin_solvers.
"""
import numpy as np
import pytest
from numpy.linalg import eigvalsh
from scipy.integrate import quad
from scipy.sparse import csr_matrix
from qutip import (
basis, destroy, expect, liouvillian, sigmax, sigmaz,
tensor, Qobj, QobjEvo, Options,
)
from qutip.fastsparse import fast_csr_matrix
from qutip.nonmarkov.bofin_baths import (
BathExponent,
Bath,
BosonicBath,
DrudeLorentzBath,
DrudeLorentzPadeBath,
UnderDampedBath,
FermionicBath,
LorentzianBath,
LorentzianPadeBath,
)
from qutip.nonmarkov.bofin_solvers import (
HierarchyADOs,
HierarchyADOsState,
HEOMSolver,
HSolverDL,
_GatherHEOMRHS,
)
from qutip.ui.progressbar import BaseProgressBar, TextProgressBar
class TestHierarchyADOs:
def mk_exponents(self, dims):
return [
BathExponent("I", dim, Q=None, ck=1.0, vk=2.0) for dim in dims
]
def test_create(self):
exponents = self.mk_exponents([2, 3])
ados = HierarchyADOs(exponents, max_depth=2)
assert ados.exponents == exponents
assert ados.max_depth == 2
assert ados.dims == [2, 3]
assert ados.vk == [2.0, 2.0]
assert ados.ck == [1.0, 1.0]
assert ados.ck2 == [None, None]
assert ados.sigma_bar_k_offset == [None, None]
assert ados.labels == [
(0, 0), (0, 1), (0, 2), (1, 0), (1, 1),
]
def test_state_idx(self):
ados = HierarchyADOs(self.mk_exponents([2, 3]), max_depth=2)
assert ados.idx((0, 0)) == 0
assert ados.idx((0, 1)) == 1
assert ados.idx((0, 2)) == 2
assert ados.idx((1, 0)) == 3
assert ados.idx((1, 1)) == 4
def test_next(self):
ados = HierarchyADOs(self.mk_exponents([2, 3]), max_depth=2)
assert ados.next((0, 0), 0) == (1, 0)
assert ados.next((0, 0), 1) == (0, 1)
assert ados.next((1, 0), 0) is None
assert ados.next((1, 0), 1) == (1, 1)
assert ados.next((1, 1), 1) is None
def test_prev(self):
ados = HierarchyADOs(self.mk_exponents([2, 3]), max_depth=2)
assert ados.prev((0, 0), 0) is None
assert ados.prev((0, 0), 1) is None
assert ados.prev((1, 0), 0) == (0, 0)
assert ados.prev((0, 1), 1) == (0, 0)
assert ados.prev((1, 1), 1) == (1, 0)
assert ados.prev((0, 2), 1) == (0, 1)
def test_exps(self):
ados = HierarchyADOs(self.mk_exponents([3, 3, 2]), max_depth=4)
assert ados.exps((0, 0, 0)) == ()
assert ados.exps((1, 0, 0)) == (ados.exponents[0],)
assert ados.exps((2, 0, 0)) == (
ados.exponents[0], ados.exponents[0],
)
assert ados.exps((1, 2, 1)) == (
ados.exponents[0],
ados.exponents[1], ados.exponents[1],
ados.exponents[2],
)
def test_filter_by_nothing(self):
ados = HierarchyADOs(self.mk_exponents([2, 3]), max_depth=2)
assert ados.filter() == [
(0, 0), (0, 1), (0, 2), (1, 0), (1, 1),
]
def test_filter_by_level(self):
ados = HierarchyADOs(self.mk_exponents([2, 3]), max_depth=2)
assert ados.filter(level=0) == [
(0, 0),
]
assert ados.filter(level=1) == [
(0, 1),
(1, 0),
]
assert ados.filter(level=2) == [
(0, 2),
(1, 1),
]
assert ados.filter(level=3) == []
def test_filter_by_exponents(self):
ados = HierarchyADOs(self.mk_exponents([2, 3]), max_depth=2)
assert ados.filter(dims=[]) == [
(0, 0),
]
assert ados.filter(dims=[2]) == [
(1, 0),
]
assert ados.filter(level=1, dims=[2]) == [
(1, 0),
]
assert ados.filter(dims=[3]) == [
(0, 1),
]
assert ados.filter(dims=[2, 3]) == [
(1, 1),
]
assert ados.filter(level=2, dims=[2, 3]) == [
(1, 1),
]
assert ados.filter(dims=[3, 3]) == [
(0, 2),
]
assert ados.filter(types=["I"]) == [
(0, 1),
(1, 0),
]
assert ados.filter(types=["I", "I"]) == [
(0, 2),
(1, 1),
]
with pytest.raises(ValueError) as err:
ados.filter(types=[], dims=[2])
assert str(err.value) == (
"The tags, dims and types filters must all be the same length."
)
with pytest.raises(ValueError) as err:
ados.filter(dims=[2, 2, 2])
assert str(err.value) == (
"The maximum depth for the hierarchy is 2 but 3 levels of"
" excitation filters were given."
)
with pytest.raises(ValueError) as err:
ados.filter(level=0, dims=[2])
assert str(err.value) == (
"The level parameter is 0 but 1 levels of excitation filters"
" were given."
)
class TestHierarchyADOsState:
def mk_ados(self, bath_dims, max_depth):
exponents = [
BathExponent("I", dim, Q=None, ck=1.0, vk=2.0) for dim in bath_dims
]
ados = HierarchyADOs(exponents, max_depth=max_depth)
return ados
def mk_rho_and_soln(self, ados, rho_dims):
| n_ados = len(ados.labels)
ado_soln = np.random.rand(n_ados, *[2**d for d in rho_dims])
rho = Qobj(ado_soln[0, :], dims=[2, 2])
return rho, ado_soln
def test_create(self):
ados = self.mk_ados([2, 3], max_depth=2)
rho, ado_soln = self.mk_rho_and_soln(ados, [2, 2])
ado_state = HierarchyADOsState(rho, ados, ado_soln)
assert ado_state.rho == rho
assert ado_state.labels == ados.labels |
assert ado_state.exponents == ados.exponents
assert ado_state.idx((0, 0)) == ados.idx((0, 0))
assert ado_state.idx((0, 1)) == ados.idx((0, 1))
def test_extract(self):
ados = self.mk_ados([2, 3], max_depth=2)
rho, ado_soln = self.mk_rho_and_soln(ados, [2, 2])
ado_state = HierarchyADOsState(rho, ados, ado_soln)
ado_state.extract((0, 0)) == rho
ado_state.extract(0) == rho
ado_state.extract((0, 1)) == Qobj(ado_soln[1, :], dims=rho.dims)
ado_state.extract(1) == Qobj(ado_soln[1, :], dims=rho.dims)
class DrudeLorentzPureDephasingModel:
""" Analytic Drude-Lorentz pure-dephasing model for testing the HEOM solver.
"""
def __init__(self, lam, gamma, T, Nk):
self.lam = lam
self.gamma = gamma
self.T = T
self.Nk = Nk
# we add a very weak system hamiltonian here to avoid having
# singular system that causes problems for the scipy.sparse.linalg
# superLU solver used in spsolve.
self.H = Qobj(1e-5 * np.ones((2, 2)))
self.Q = sigmaz()
def rho(self):
""" Initial state. """
return 0.5 * Qobj(np.ones((2, 2)))
def state_results(self, states):
projector = basis(2, 0) * basis(2, 1).dag()
return expect(states, projector)
def analytic_results(self, tlist):
lam, gamma, T = self.lam, self.gamma, self.T
lam_c = lam / np.pi
def _integrand(omega, t):
J = 2 * lam_c * omega * gamma / (omega**2 + gamma**2)
return (-4 * J * (1 - np.cos(omega*t))
/ (np.tanh(0.5*omega / T) * omega**2))
# Calculate the analytical results by numerical integration
return [
0.5 * np.exp(quad(_integrand, 0, np.inf, args=(t,), limit=5000)[0])
for t in tlist
]
def bath_coefficients(self):
""" Correlation function expansion coefficients for the Drude-Lorentz bath.
"""
lam, gamma, T = self.lam, self.gamma, self.T
Nk = self.Nk
ck_real = [lam * gamma * (1 / np.tan(gamma / (2 * T)))]
ck_real.extend([
(4 * lam * gamma * T * 2 * np.pi * k * T /
((2 * np.pi * k * T)**2 - gamma**2))
for k in range(1, Nk + 1)
])
vk_real = [gamma]
vk_real.extend([2 * np.pi * k * T for |
matthrice/MarkeTouch-TTS | test/logger_tests.py | Python | apache-2.0 | 684 | 0.038012 | import logging
from logging.handlers import RotatingFileHandler
import time
logging.basicConfig(filemode='a',
format='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=loggin | g.DEBUG)
def test_createRotatingLog(path):
#initiates logging session
logger = logging.getLogger("TTS_test")
#defines handler for byte size
#will roll over after 100 mb, will max out at 10 backup files
sizeHandler = RotatingFileHandler(path, maxBytes=20,
backupCount=5)
logger.addHandler(sizeHandler)
for i in (3, 2, 1):
logger.info("This is test log line - %s" % i)
time.sleep(5)
log_file = "test.log"
test_createRotat | ingLog(log_file)
|
frankyrumple/smc | modules/pytube/api.py | Python | mit | 12,801 | 0.000234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .exceptions import MultipleObjectsReturned, YouTubeError, CipherError
from .tinyjs import JSVM
from .models import Video
from .utils import safe_filename
try:
from urllib2 import urlopen
from urlparse import urlparse, parse_qs, unquote
except ImportError:
from urllib.parse import urlparse, parse_qs, unquote
from urllib.request import urlopen
import re
import json
YT_BASE_URL = 'http://www.youtube.com/get_video_info'
# YouTube quality and codecs id map.
# source: http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs
YT_ENCODING = {
# Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
# 3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
# MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"],
# WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
# The keys corresponding to the quality/codec map above.
YT_ENCODING_KEYS = (
'extension',
'resolution',
'video_codec',
'profile',
'video_bitrate',
'audio_codec',
'audio_bitrate'
)
class YouTube(object):
# TODO: just cause you CAN do this, doesn't mean you should. `hasattr` is
# much cleaner.
_filename = None
_fmt_values = []
_video_url = None
_js_code = False
_precompiled = False
title = None
videos = []
# fmt was an undocumented URL parameter that allowed selecting
# YouTube quality mode without using player user interface.
@property
def url(self):
"""Exposes the video url.
"""
return self._video_url
@url.setter
def url(self, url):
""" Defines the URL of the YouTube video.
"""
# TODO: erm, this is ugly. url should just be a method, not a property.
self._video_url = url
# Reset the filename.
self._filename = None
# Get the video details.
self._get_video_info()
@property
def filename(self):
"""Exposes the title of the video. If this is not set, one is generated
based on the name of the video.
"""
if not self._filename:
self._filename = safe_filename(self.title)
return self._filename
@filename.setter
def filename(self, filename):
"""Defines the filename.
"""
self._filename = filename
if self.videos:
for video in self.videos:
video.filename = filename
@property
def video_id(self):
"""Gets the video ID extracted from the URL.
"""
parts = urlparse(self._video_url)
qs = getattr(parts, 'query', None)
if qs:
video_id = parse_qs(qs).get('v', None)
if video_id:
return video_id.pop()
def get(self, extension=None, reso | lution=None, profile="High"):
"""Return a single video given an extention and resolution.
:params extention: The desired file extention (e.g.: mp4).
| :params resolution: The desired video broadcasting standard.
:params profile: The desired quality profile.
"""
result = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
elif profile and v.profile != profile:
continue
else:
result.append(v)
if not len(result):
return
elif len(result) is 1:
return result[0]
else:
raise MultipleObjectsReturned(
"get() returned more than one object")
def filter(self, extension=None, resolution=None):
"""Return a filtered list of videos given an extention and resolution
criteria.
:params extention: The desired file extention (e.g.: mp4).
:params resolution: The desired video broadcasting standard.
"""
results = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
else:
results.append(v)
return results
def _fetch(self, path, data):
"""Given a path, traverse the response for the desired data. (A
modified ver. of my dictionary traverse method:
https://gist.github.com/2009119)
:params path: A tuple representing a path to a node within a tree.
:params data: The data containing the tree.
"""
elem = path[0]
# Get first element in tuple, and check if it contains a list.
if type(data) is list:
# Pop it, and let's continue..
return self._fetch(path, data.pop())
# Parse the url encoded data
data = parse_qs(data)
# Get the element in our path
data = data.get(elem, None)
# Offset the tuple by 1.
path = path[1::1]
# Check if the path has reached the end OR the element return
# nothing.
if len(path) is 0 or data is None:
if type(data) is list and len(data) is 1:
data = data.pop()
return data
else:
# Nope, let's keep diggin'
return self._fetch(path, data)
def _parse_stream_map(self, text):
"""Python's `parse_qs` can't properly decode the stream map
containing video data so we use this instead.
"""
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"s": [],
"type": []
}
# Split individual videos
videos = text.split(",")
# Unquote the characters and split to parameters
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
videoinfo.get(key, []).append(unquote(value))
return videoinfo
def _get_video_info(self):
"""This is responsable for executing the request, extracting the
necessary details, and populating the different video resolutions and
formats into a list.
"""
# TODO: split up into smaller functions. Cyclomatic complexity => 15
self.title = None
self.videos = []
response = urlopen(self.url)
if response:
content = response.read().decode("utf-8")
try:
player_conf = content[18 + content.find("ytplayer.config = "):]
bracket_count = 0
for i, char in enumerate(player_conf):
if char == "{":
bracket_count += 1
elif char == "}":
bracket_count -= 1
|
hopr/hopr | script/evlogger.py | Python | gpl-3.0 | 8,419 | 0.006295 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of hopr: https://github.com/hopr/hopr.
#
# Hopr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hopr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU | General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hopr. If not, see <http://www.gnu.org/lice | nses/>.
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from past.builtins import basestring
from builtins import object
import csv
import sys
from pickle import dump, load
from collections import defaultdict
from evdev import ecodes as e
from hopr.backend.evdev import find_keyboards, read_events, etype
from time import time
import os
import logging
from pprint import pprint, pformat
# TODO: Measure time spent pressing any particular key. How much time is spent pressing ctrl vs shift?
# TODO: General timing info. How much time is spent typing?
class SaveFile(object):
def __init__(self, filename):
self.filename = filename
@property
def temp_filename(self):
return self.filename + '.tmp'
def __enter__(self):
self.file = open(self.temp_filename, 'wb')
return self
def __exit__(self, exc, exc_type, exc_tb):
os.fsync(self.file.fileno())
self.file.close()
# Rename temp file to filename
os.rename(self.temp_filename, self.filename)
def key_name(code):
if code is None:
return 'NONE'
else:
name = e.keys[code]
if not isinstance(name, basestring):
name = name[0]
return name.replace('KEY_', '')
def key_code(name):
if name == 'NONE':
return None
else:
return e.ecodes['KEY_' + name]
def append(history, code):
history[:-1] = history[1:]
history[-1] = code
def tuplify(x):
if isinstance(x, tuple):
return x
else:
return (x,)
def save_ngrams(filename, ngrams):
n_ngrams = len(ngrams)
n_event = sum([n for _,n in list(ngrams.items())])
logging.info('Saving: {filename} n-grams: {n_ngrams} event_count: {n_event}'.format(**locals()))
key_header = None
with SaveFile(filename) as f:
w = csv.writer(f.file)
for (keys, values) in list(ngrams.items()):
keys = tuplify(keys)
values = tuplify(values)
if not key_header:
key_header = ['key{}'.format(i+1) for i in range(len(keys))]
value_header = ['value']
w.writerow(key_header + value_header)
w.writerow(keys + values)
def save_csv(filename, log):
for (tag, sublog) in list(log.items()):
save_ngrams(filename + '.' + tag, sublog)
def empty_sublog():
return defaultdict(int)
def empty_log():
return defaultdict(empty_sublog)
def load_ngrams(filename):
ngrams = empty_sublog()
reader = csv.reader(open(filename, 'rb'))
header = next(reader)
assert header[-1] == 'value'
n = len(header) - 1
for row in reader:
key = tuple(int(code) for code in row[:-1])
n = int(row[-1])
ngrams[key] = n
return ngrams
# def load_csv(filename):
# log = empty_log()
# for fname in glob(filename + '.*'):
# tag = fname.replace('filename.', '')
# sublog = load_ngrams(fname)
def load_pickle(filename):
return load(open(filename, 'rb'))
def save_pickle(filename, log):
logging.info('Saving log: {}'.format(dict((key, len(value)) for key,value in list(log.items()))))
with SaveFile(filename) as f:
dump(log, f.file, protocol=2)
# def save_csv(filename, log):
# save_ngrams(filename + '.bigrams', log['event'])
# save_ngrams(filename + '.events', log['press2'])
load_log = load_pickle
def save_log(filename, log):
save_pickle(filename, log)
save_csv(filename, log)
def start(filename, save_interval):
" Log statistics for all keyboard events "
if os.path.exists(filename):
log = load_log(filename)
else:
log = empty_log()
press = [None]*2
last_save = 0
try:
for ev in read_events(find_keyboards()):
if time() - last_save > save_interval:
save_log(filename, log)
last_save = time()
if ev.type == e.EV_KEY:
logging.debug('{}'.format(ev))
log[etype.name(ev.value)][key_name(ev.code)] += 1
if ev.value == etype.KEY_PRESS:
append(press, key_name(ev.code))
log['PRESS2'][tuple(press)] += 1
except KeyboardInterrupt as SystemExit:
logging.info('Quitting. Saving log')
save_log(filename, log)
except Exception as exc:
logging.error('Unexpected exception' + str(exc))
raise
def view(filename):
x = load_log(filename)
events = {}
for ((code, value), count) in list(x['event'].items()):
key = '{}={}'.format(key_name(code), value)
events[key] = count
print('Key Count')
pprint(events)
press = {}
for ((code1, code2), count) in list(x['press2'].items()):
key = '{},{}'.format(key_name(code1), key_name(code2))
press[key] = count
print('Paired event count')
pprint(press)
# def print_pairs(filename):
# x = load_log(filename)
# pairs = x['press2']
# codes = sorted(set([c for pair in pairs for c in pair]))
# code2idx = dict((c,idx) for (idx, c) in enumerate(codes))
# N = len(codes)
# count = [[0]*N for _ in range(N)]
# for ((code1, code2), n) in x['press2'].items():
# count[code2idx[code1]][code2idx[code2]] += n
# codes = [key_name(code) for code in codes]
# w = csv.writer(sys.stdout)
# w.writerow([''] + codes)
# for code, row in zip(codes, count):
# w.writerow([code] + row)
def summary(filename, top):
x = load_log(filename)
press = []
hold = []
for ((code, value), count) in list(x['event'].items()):
if value == etype.KEY_PRESS:
press.append((count, key_name(code)))
elif value == etype.KEY_HOLD:
hold.append((count, key_name(code)))
print('Key Press')
pprint(sorted(press, reverse=True)[:top])
print('Key Hold')
pprint(sorted(hold, reverse=True)[:top])
press2 = []
for ((code1, code2), count) in list(x['press2'].items()):
# TODO: Option. Ignore repeated key presses.
if code1 != code2:
key = '{},{}'.format(key_name(code1), key_name(code2))
press2.append((count, key))
print('Paired event count')
pprint(sorted(press2, reverse=True)[:top])
def run(cmd, verbose, **kwargs):
if verbose:
logging.getLogger().setLevel(level=logging.DEBUG)
logging.debug('Running {} with args {}'.format(cmd, pformat(kwargs)))
f = globals()[cmd]
f(**kwargs)
def run_parse_args(args):
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-v', '--verbose', action='store_true')
sp = p.add_subparsers(dest='cmd')
q = sp.add_parser('start')
q.add_argument('-i', '--save_interval', default=60.0, type=float)
# TODO: Global file name arg.
q.add_argument('filename', nargs='?', default='out/evlogger.log')
q = sp.add_parser('view')
q.add_argument('filename', nargs='?', default='out/evlogger.log')
q = sp.add_parser('summary')
q.add_argument('filename', nargs='?', default='out/evlogger.log')
q.add_argument('-t', '--top', default=15, type=int)
# q = sp.add_parser('print_pairs')
# q.add_argument('filename', nargs='?', default='out/evl |
edx-solutions/edx-platform | common/djangoapps/xblock_django/tests/test_user_service.py | Python | agpl-3.0 | 5,271 | 0.002087 | """
Tests for the DjangoXBlockUserService.
"""
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from openedx.core.djangoapps.external_user_ids.models import ExternalIdType
from student.models import anonymous_id_for_user
from student.tests.factories import AnonymousUserFactory, UserFactory
from xblock_django.user_service import (
ATTR_KEY_IS_AUTHENTICATED,
ATTR_KEY_USER_ID,
ATTR_KEY_USER_IS_STAFF,
ATTR_KEY_USER_PREFERENCES,
ATTR_KEY_USERNAME,
USER_PREFERENCES_WHITE_LIST,
DjangoXBlockUserService
)
class UserServiceTestCase(TestCase):
"""
Tests for the DjangoXBlockUserService.
"""
def setUp(self):
super(UserServiceTestCase, self).setUp()
self.user = UserFactory(username="tester", email="test@tester.com")
self.user.profile.name = "Test Tester"
set_user_preference(self.user, 'pref-lang', 'en')
set_user_preference(self.user, 'time_zone', 'US/Pacific')
set_user_preference(self.user, 'not_white_listed', 'hidden_value')
self.anon_user = AnonymousUserFactory()
def assert_is_anon_xb_user(self, xb_user):
"""
A set of assertions for an anonymous XBlockUser.
"""
self.assertFalse(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED])
self.assertIsNone(xb_user.full_name)
self.assertListEqual(xb_user.emails, [])
def assert_xblock_user_matches_django(self, xb_user, dj_user):
"""
A set of assertions for comparing a XBlockUser to a django User
"""
self.assertTrue(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED])
self.assertEqual(xb_user.emails[0], dj_user.email)
self.assertEqual(xb_user.full_name, dj_user.profile.name)
self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USERNAME], dj_user.username)
self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USER_ID], dj_user.id)
self.assertFalse(xb_user.opt_attrs[ATTR_KEY_USER_IS_STAFF])
self.assertTrue(
all(
pref in USER_PREFERENCES_WHITE_LIST
for pref in xb_user.opt_attrs[ATTR_KEY_USER_PR | EFERENCES]
)
)
def test_convert_anon_user(self):
"""
Tests for convert_django_user_to_xblock_user behavior when django user is AnonymousUser.
"""
django_user_service = DjangoXBlockUserService(self.anon_user)
xb_user = django_user_service.get_current_user()
self.assertTrue(xb_user.is_current_user)
self.assert_is_anon_xb_user(xb_user) |
def test_convert_authenticate_user(self):
"""
Tests for convert_django_user_to_xblock_user behavior when django user is User.
"""
django_user_service = DjangoXBlockUserService(self.user)
xb_user = django_user_service.get_current_user()
self.assertTrue(xb_user.is_current_user)
self.assert_xblock_user_matches_django(xb_user, self.user)
def test_get_anonymous_user_id_returns_none_for_non_staff_users(self):
"""
Tests for anonymous_user_id method to return None if user is Non-Staff.
"""
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=False)
anonymous_user_id = django_user_service.get_anonymous_user_id(
username=self.user.username,
course_id='edx/toy/2012_Fall'
)
self.assertIsNone(anonymous_user_id)
def test_get_anonymous_user_id_returns_none_for_non_existing_users(self):
"""
Tests for anonymous_user_id method to return None username does not exist in system.
"""
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True)
anonymous_user_id = django_user_service.get_anonymous_user_id(username="No User", course_id='edx/toy/2012_Fall')
self.assertIsNone(anonymous_user_id)
def test_get_anonymous_user_id_returns_id_for_existing_users(self):
"""
Tests for anonymous_user_id method returns anonymous user id for a user.
"""
course_key = CourseKey.from_string('edX/toy/2012_Fall')
anon_user_id = anonymous_id_for_user(
user=self.user,
course_id=course_key,
save=True
)
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True)
anonymous_user_id = django_user_service.get_anonymous_user_id(
username=self.user.username,
course_id='edX/toy/2012_Fall'
)
self.assertEqual(anonymous_user_id, anon_user_id)
def test_external_id(self):
"""
Tests that external ids differ based on type.
"""
ExternalIdType.objects.create(name='test1', description='Test type 1')
ExternalIdType.objects.create(name='test2', description='Test type 2')
django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True)
ext_id1 = django_user_service.get_external_user_id('test1')
ext_id2 = django_user_service.get_external_user_id('test2')
assert ext_id1 != ext_id2
with self.assertRaises(ValueError):
django_user_service.get_external_user_id('unknown')
|
rakhi27/microurl | microurl/bitly.py | Python | gpl-2.0 | 22,925 | 0.033544 | import requests
import base64
import urllib
class bitlyapi(object):
def __init__(self,access_token):
self.host = 'https://api.bit.ly/'
self.ssl_host = 'https://api-ssl.bit.ly/'
self.access_token=access_token
def shorturl(self,url,preferred_domain=None):
params = dict(longUrl=url)
if preferred_domain:
params['domain'] = preferred_domain
params['access_token'] = self.access_token
response=self.send_request('get',self.ssl_host+'v3/shorten',params)
return response
def expand(self,url):
params=dict(shortUrl=url)
params['access_token']= self.access_token
response=self.send_request('get',self.ssl_host+'v3/expand',params)
return response
def url_info(self,url,bitly_hash=None,expand_user=None):
params=dict(shortUrl=url)
if bitly_hash:
params['hash']=bitly_hash
if expand_user:
params['expand_user']=expand_user
params['access_token']= self.access_token
response=self.send_request('get',self.ssl_host+'v3/info',params)
return response
def link_lookup(self,url):
params=dict(url=url)
params['access_token']= self.access_token
| response=self.send_request('get',self.ssl_host+'v3/link/lookup',params)
return response
def link_edit(self,link,edit,title=None,note=None,private=None,user_ts=None,archived=None):
params=dict(link=link)
params['edit']=edit
if title:
params['title']=title
| if note:
params['note']=note
if private:
params['private']=private
if user_ts:
params['user_ts']=user_ts
if archived:
params['archived']=archived
params['access_token']=self.access_token
response=self.send_request('get',self.ssl_host+'v3/user/link_edit',params)
return response
def user_link_lookup(self,url):
params=dict(url=url)
params['access_token']= self.access_token
response=self.send_request('get',self.ssl_host+'v3/user/link_lookup',params)
return response
def user_link_save(self,longUrl,title=None,note=None,private=None,user_ts=None):
params=dict(longUrl=longUrl)
params['access_token']= self.access_token
if title:
params['title']=title
if note:
params['note']=note
if private:
params['private']=private
if user_ts:
params['user_ts']=user_ts
response=self.send_request('get',self.ssl_host+'v3/user/link_save',params)
return response
def highvalue(self,limit=10):
params=dict(limit=limit)
params['access_token']=self.access_token
response=self.send_request('get',self.ssl_host+'v3/highvalue',params)
return response
def search(self,query,limit=10,offset=0,lang='en',cities=None,domain=None,fields=None):
params=dict(limit=limit)
params['offset']=offset
params['query']=query
params['lang']=lang
if cities:
params['cities']=cities
if domain:
params['domain']=domain
if fields:
params['fields']=fields
params['access_token']=self.access_token
response=self.send_request('get',self.ssl_host+'v3/search',params)
return response
def bursting_phrases(self):
params=dict(access_token=self.access_token)
response=self.send_request('get',self.ssl_host+'v3/realtime/bursting_phrases',params)
return response
def hot_phrases(self):
params=dict(access_token=self.access_token)
response=self.send_request('get',self.ssl_host+'v3/realtime/hot_phrases',params)
return response
def clickrate(self,phrase):
params=dict(access_token=self.access_token)
params['phrase']=phrase
response=self.send_request('get',self.ssl_host+'v3/realtime/clickrate',params)
return response
def link_info(self,link):
params=dict(access_token=self.access_token)
params['link']=link
response=self.send_request('get',self.ssl_host+'v3/link/info',params)
return response
def link_content(self,link,content_type='html'):
params=dict(access_token=self.access_token)
params['link']=link
params['content_type']=content_type
response=self.send_request('get',self.ssl_host+'v3/link/content',params)
return response
def link_category(self,link):
params=dict(access_token=self.access_token)
params['link']=link
response=self.send_request('get',self.ssl_host+'v3/link/category',params)
return response
def link_social(self,link):
params=dict(access_token=self.access_token)
params['link']=link
response=self.send_request('get',self.ssl_host+'v3/link/social',params)
return response
def link_location(self,link):
params=dict(access_token=self.access_token)
params['link']=link
response=self.send_request('get',self.ssl_host+'v3/link/location',params)
return response
def link_language(self,link):
params=dict(access_token=self.access_token)
params['link']=link
response=self.send_request('get',self.ssl_host+'v3/link/language',params)
return response
def app_details(self,client_id):
params=dict(access_token=self.access_token)
params['client_id']=client_id
response=self.send_request('get',self.ssl_host+'v3/oauth/app',params)
return response
def user_info(self,login=None,full_name=None):
params=dict(access_token=self.access_token)
if login:
params['login']=login
if full_name:
params['full_name']=self.full_name
response=self.send_request('get',self.ssl_host+'v3/user/info',params)
return response
def user_linkhistory(self,link=None,limit=None,offset=None,created_before=None,created_after=None,modified_after=None,expand_client_id=None,archived=None,private=None,user=None):
params=dict(access_token=self.access_token)
if link:
params['link']=link
if offset:
params['offset']=offset
if created_before:
params['created_before']=created_before
if created_after:
params['created_after']=created_after
if modified_after:
params['modified_after']=modified_after
if expand_client_id:
params['expand_client_id']=expand_client_id
if archived:
params['archived']=archived
if private:
params['private']=private
if user:
params['user']=user
response=self.send_request('get',self.ssl_host+'v3/user/link_history',params)
return response
def user_networkhistory(self,offset=None,expand_client_id=None,limit=None,expand_user=None):
params=dict(access_token=self.access_token)
if offset:
params['offset']=offset
if expand_client_id:
params['expand_client_id']=expand_client_id
if limit:
params['limit']=limit
if expand_user:
params['expand_user']=expand_user
response=self.send_request('get',self.ssl_host+'v3/user/network_history',params)
return response
def user_tracking_domain_list(self):
params=dict(access_token=self.access_token)
response=self.send_request('get',self.ssl_host+'v3/user/tracking_domain_list',params)
return response
def user_clicks(self, **kwargs):
params=dict(access_token=self.access_token)
response=self.send_metrics_request('get',self.ssl_host+'v3/user/clicks',params,**kwargs)
return response
def user_countries(self, **kwargs):
params=dict(access_token=self.access_to |
tfroehlich82/picamera | picamera/camera.py | Python | bsd-3-clause | 173,859 | 0.001202 | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': | mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM | _EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': |
OpenGenus/cosmos | code/online_challenges/src/project_euler/problem_067/problem_067.py | Python | gpl-3.0 | 79,937 | 0.000038 | def main():
prob = [
[59],
[73, 41],
[52, 40, 9],
[26, 53, 6, 34],
[10, 51, 87, 86, 81],
[61, 95, 66, 57, 25, 68],
[90, 81, 80, 38, 92, 67, 73],
[30, 28, 51, 76, 81, 18, 75, 44],
[84, 14, 95, 87, 62, 81, 17, 78, 58],
[21, 46, 71, 58, 2, 79, 62, 39, 31, 9],
[56, 34, 35, 53, 78, 31, 81, 18, 90, 93, 15],
[78, 53, 4, 21, 84, 93, 32, 13, 97, 11, 37, 51],
[45, 3, 81, 79, 5, 18, 78, 86, 13, 30, 63, 99, 95],
[39, 87, 96, 28, 3, 38, 42, 17, 82, 87, 58, 7, 22, 57],
[6, 17, 51, 17, 7, 93, 9, 7, 75, 97, 95, 78, 87, 8, 53],
[67, 66, 59, 60, 88, 99, 94, 65, 55, 77, 55, 34, 27, 53, 78, 28],
[76, 40, 41, 4, 87, 16, 9, 42, 75, 69, 23, 97, 30, 60, 10, 79, 87],
[12, 10, 44, 26, 21, 36, 32, 84, 98, 60, 13, 12, 36, 16, 63, 31, 91, 35],
[70, 39, 6, 5, 55, 27, 38, 48, 28, 22, 34, 35, 62, 62, 15, 14, 94, 89, 86],
[66, 56, 68, 84, 96, 21, 34, 34, 34, 81, 62, 40, 65, 54, 62, 5, 98, 3, 2, 60],
[
38,
89,
46,
37,
99,
54,
34,
53,
36,
14,
70,
26,
2,
90,
45,
13,
31,
61,
83,
73,
47,
],
[
36,
10,
63,
96,
60,
49,
41,
5,
37,
42,
14,
58,
84,
93,
96,
17,
9,
43,
5,
43,
6,
59,
],
[
66,
57,
87,
57,
61,
28,
37,
51,
84,
73,
79,
15,
39,
95,
88,
87,
43,
39,
11,
86,
77,
74,
18,
],
[
54,
42,
5,
79,
30,
49,
99,
73,
46,
37,
50,
2,
45,
9,
54,
52,
27,
95,
27,
65,
19,
45,
26,
45,
],
[
71,
39,
17,
78,
76,
29,
52,
90,
18,
99,
78,
19,
35,
62,
71,
19,
23,
65,
93,
85,
49,
33,
75,
9,
2,
],
[
33,
24,
47,
61,
60,
55,
32,
88,
57,
55,
91,
54,
46,
57,
7,
77,
98,
52,
80,
99,
| 24,
25,
46,
78,
79,
5,
],
[
92,
9,
13,
55,
10,
67,
26,
78,
76,
82,
63,
49,
51,
31,
24,
68,
5,
57,
7,
54,
69,
| 21,
67,
43,
17,
63,
12,
],
[
24,
59,
6,
8,
98,
74,
66,
26,
61,
60,
13,
3,
9,
9,
24,
30,
71,
8,
88,
70,
72,
70,
29,
90,
11,
82,
41,
34,
],
[
66,
82,
67,
4,
36,
60,
92,
77,
91,
85,
62,
49,
59,
61,
30,
90,
29,
94,
26,
41,
89,
4,
53,
22,
83,
41,
9,
74,
90,
],
[
48,
28,
26,
37,
28,
52,
77,
26,
51,
32,
18,
98,
79,
36,
62,
13,
17,
8,
19,
54,
89,
29,
73,
68,
42,
14,
8,
16,
70,
37,
],
[
37,
60,
69,
70,
72,
71,
9,
59,
13,
60,
38,
13,
57,
36,
9,
30,
43,
89,
30,
39,
15,
2,
44,
73,
5,
73,
26,
63,
56,
86,
12,
],
[
55,
55,
85,
50,
62,
99,
84,
77,
28,
85,
3,
21,
27,
22,
19,
26,
82,
69,
54,
4,
13,
7,
85,
14,
1,
15,
70,
59,
89,
95,
10,
19,
],
[
4,
9,
31,
92,
91,
38,
92,
86,
98,
75,
21,
5,
64,
42,
62,
84,
36,
20,
73,
42,
21,
23,
22,
51,
51,
79,
25,
45,
85,
53,
3,
43,
22,
],
[
75,
63,
2,
49,
14,
12,
89,
14,
60,
78,
92,
16,
44,
82,
38,
30,
72,
11,
46,
52,
90,
27,
8,
65,
78,
3,
85,
41,
57,
79,
39,
52,
33,
48,
],
[
78,
27,
56,
56,
39,
13,
19,
43,
86,
72,
58,
95,
39,
7,
4,
34,
21,
98,
39,
15,
39,
84,
89,
69,
84,
46,
37,
57,
59,
35,
59,
50,
26,
15,
93,
],
[
42,
89,
36,
27,
78,
91,
24,
11,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.