repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
TomckySan/python-training | string.py | Python | mit | 278 | 0 | # coding: utf-8
# | ζεεγ―""γ§γ''γ§γγγγ
print "hello" + ' world'
print u"γ»γ" * 5
print u"ζζ₯γ\tγγ£γ¨\tγγ¬γ«γ€\n\"\\\\γγ―γ―γ―γ\""
# θ€ζ°θ‘γ«ζΈ‘γγ¨γγ―"γ3εηΆγγγ°γγ
print u"""
δ»ζ₯γ
γγ
ε€©ζ° |
γ§γ
γ
"""
|
octomike/hellodjango | paste/models.py | Python | mit | 341 | 0.014663 | from django.db import models
from uuid import uuid4
def get_short_uuid():
return str(uuid4())[0:5]
class Code(models.Model):
key = models.CharField(max_length=6, primary_key=True, default=get_short_uuid)
lang = models.CharField(max_length=32, default='bas | h')
code = models.Text | Field(max_length=2048, help_text="some Code")
|
lcrees/twoq | twoq/tests/lazy/auto/test_filtering.py | Python | bsd-3-clause | 962 | 0.00104 | # -*- coding: utf-8 -*-
from twoq.support import unittest
#pylint: disable-msg=w0614,w0401
from twoq.tests.auto.filtering import * # @UnusedWildImport
from twoq.tests.auto.queuing import AQMixin
class TestAutoFilterQ(unittest.TestCase, AQMixin, AFilterQMixin):
def setUp(self):
self.maxDiff = None
from twoq.lazy.filtering import filterq
self.qclass = filterq
class TestAutoSliceQ(unittest.TestCase, AQMixin, ASliceQMixin):
def setUp(self):
from twoq.lazy.filtering import sliceq
self.qclass = sliceq
class TestAutoCollectQ(unittest.TestCase, AQMixin, ACollectQMixin):
def setUp(self):
self.maxDi | ff = None
from twoq.lazy.filtering import collectq
self.qclass = collectq
class TestAutoSetQ(u | nittest.TestCase, AQMixin, ASetQMixin):
def setUp(self):
from twoq.lazy.filtering import setq
self.qclass = setq
if __name__ == '__main__':
unittest.main()
|
ff0000/red-fab-deploy | fab_deploy/base/snmp.py | Python | mit | 2,394 | 0.002506 | import os
from fab_deploy import funct | ions
from fab_deploy.config import CustomConfig
from fab_deploy.base.file_based import BaseUpdateFiles
from fabric.api import run, sudo, env, put, execute, | local
from fabric.tasks import Task
class SNMPSingleSync(Task):
"""
Sync a snmp config file
Takes one required argument:
* **filename**: the full path to the file to sync.
"""
name = 'sync_single'
remote_config_path = '/etc/sma/snmp/snmpd.conf'
def _add_package(self):
raise NotImplementedError()
def _restart_service(self):
raise NotImplementedError()
def run(self, filename=None):
"""
"""
assert filename
put(filename, '/var/tmp/tmpsnmpd.conf')
sudo("mv /var/tmp/tmpsnmpd.conf %s" % self.remote_config_path)
self._add_package()
self._restart_service()
class SNMPUpdate(BaseUpdateFiles):
"""
Update snmp config file(s)
Takes one argument:
* **section**: The name of the section in your server.ini that you
would like to update. If section is not provided all
sections will be updated.
Changes made by this task are not commited to your repo, or deployed
anywhere automatically. You should review any changes and commit and
deploy as appropriate.
This is a serial task, that should not be called directly
with any remote hosts as it performs no remote actions.
"""
name = 'update_files'
serial = True
config_section = 'monitor'
directory = 'snmp'
filename = 'snmpd.conf'
start_line = "## Start Configurable Section ##"
end_line = "## End Configurable Section ##"
def _get_lines(self, item):
section = env.config_object.get(self.config_section, 'community')
return [ "rocommunity %s %s" % (section, x) for x in \
env.config_object.get_list(self.config_section,item) ]
def run(self, section=None):
"""
"""
if section:
sections = [section]
else:
sections = env.config_object.server_sections()
lines = [self.start_line]
lines.extend(self._get_lines(env.config_object.CONNECTIONS))
lines.extend(self._get_lines(env.config_object.INTERNAL_IPS))
lines.append(self.end_line)
for s in sections:
self._save_to_file(s, lines)
|
Comunitea/CMNT_00040_2016_ELN_addons | product_price_history_analysis/__init__.py | Python | agpl-3.0 | 1,005 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2016 QUIVAL, S.A. All Rights Reserved
# $Pedro GΓ³mez Campos$ <pegomez@elnogal.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will b | e useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################ | ##################################################
from . import report
|
agurfinkel/brunch | exp/bencher.py | Python | mit | 2,706 | 0.000739 | #! /usr/bin/env python3
# Suggest name to z3 binary based on it its sha
import sys
import words
import subprocess
import argparse
import os.path
import shutil
from pathlib import Path
import yaml
class Bencher(object):
def __init__(self):
self._name = 'bencher'
self._help = 'Make benchmark direcotry'
def mk_arg_parser(self, ap):
ap.add_argument('--suffix',
'-s',
metavar='EXT',
type=str,
default='smt2',
help='File extension')
ap.add_argument('--prefix',
'-p',
metavar='PREF',
required='True',
help='Prefix to assign')
ap.add_argument('--out',
'-o',
type=str,
metavar="DIR",
help='Output directory',
required=True)
ap.add_argument('files', nargs='+')
ap.add_argument(
'--mv',
action='store_true',
help='Move (instead of copy) benchmarks into new location')
ap.add_argument('--verbose', '-v', action='store_true')
ap.add_argument('--dry-run', action='store_true')
return ap
def run(self, args=None):
num_files = len(args.files)
num_fmt = '{idx:0' + str(len(str(num_files))) + '}'
out_dir = Path(args.out)
out_dir.mkdir(paren | ts=True, exist_ok=True)
prefix = args.prefix
suffix = args.suffix
# pick an action to apply to each file
if args.dry_run:
def _dry_ | run_action(src, dst):
pass
file_action = _dry_run_action
elif args.mv:
file_action = shutil.move
else:
file_action = shutil.copy2
inverse = dict()
for id, src in enumerate(args.files):
idx_str = num_fmt.format(idx=id)
dst_name = f'{prefix}-{idx_str}.{suffix}'
dst = out_dir / dst_name
if (args.verbose):
print(f'{src} --> {dst}')
file_action(src, dst)
inverse[dst_name] = src
with open(out_dir / 'inverse.yaml', 'w') as inverse_file:
yaml.dump(inverse, inverse_file)
return 0
def main(self, argv):
ap = argparse.ArgumentParser(prog=self._name, description=self._help)
ap = self.mk_arg_parser(ap)
args = ap.parse_args(argv)
return self.run(args)
def main():
cmd = Bencher()
return cmd.main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
|
sternshus/arelle2.7 | svr-2.7/arelle/ValidateFilingText.py | Python | apache-2.0 | 33,886 | 0.006758 | u'''
Created on Oct 17, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
#import xml.sax, xml.sax.handler
from __future__ import with_statement
from lxml.etree import XML, DTD, SubElement, XMLSyntaxError
import os, re, io
from arelle import XbrlConst
from arelle.ModelObject import ModelObject
from io import open
XMLdeclaration = re.compile(ur"<\?xml.*\?>", re.DOTALL)
XMLpattern = re.compile(ur".*(<|<|<|<)[A-Za-z_]+[A-Za-z0-9_:]*[^>]*(/>|>|>|/>).*", re.DOTALL)
CDATApattern = re.compile(ur"<!\[CDATA\[(.+)\]\]")
#EFM table 5-1 and all &xxx; patterns
docCheckPattern = re.compile(ur"&\w+;|[^0-9A-Za-z`~!@#$%&\*\(\)\.\-+ \[\]\{\}\|\\:;\"'<>,_?/=\t\n\r\m\f]") # won't match &#nnn;
namedEntityPattern = re.compile(u"&[_A-Za-z\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]"
ur"[_\-\.:"
u"\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]*;")
#entityPattern = re.compile("&#[0-9]+;|"
# "&#x[0-9a-fA-F]+;|"
# "&[_A-Za-z\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]"
# r"[_\-\.:"
# "\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]*;")
edbodyDTD = None
u''' replace with lxml DTD validation
bodyTags = {
'a': (),
'address': (),
'b': (),
'big': (),
'blockquote': (),
'br': (),
'caption': (),
'center': (),
'cite': (),
'code': (),
'dd': (),
'dfn': (),
'dir': (),
'div': (),
'dl': (),
'dt': (),
'em': (),
'font': (),
'h1': (),
'h2': (),
'h3': (),
'h4': (),
'h5': (),
'h6': (),
'hr': (),
'i': (),
'img': (),
'kbd': (),
'li': (),
'listing': (),
'menu': (),
'ol': (),
'p': (),
'plaintext': (),
'pre': (),
'samp': (),
'small': (),
'strike': (),
'strong': (),
'sub': (),
'sup': (),
'table': (),
'td': (),
'th': (),
'tr': (),
'tt': (),
'u': (),
'ul': (),
'var': (),
'xmp': ()
}
htmlAttributes = {
'align': ('h1','h2','h3','h4','h5','h6','hr', 'img', 'p','caption','div','table','td','th','t | r'),
'alink': ('body'),
'alt': ('img'),
'bgcolor': ('body','table', 'tr', 'th', 'td'),
'border': ('table', 'img'),
'cellpadding': ('table'),
'cellspacing': ('table'),
'class': ('*'),
'clear': ('br'),
'color': ('font'),
'colspan': ('td','th'),
'compact': ('dir','dl','menu','ol','ul'),
'content': ('meta'),
'dir | ': ('h1','h2','h3','h4','h5','h6','hr','p','img','caption','div','table','td','th','tr','font',
'center','ol','li','ul','bl','a','big','pre','dir','address','blockqoute','menu','blockquote',
'em', 'strong', 'dfn', 'code', 'samp', 'kbd', 'var', 'cite', 'sub', 'sup', 'tt', 'i', 'b', 'small', 'u', 'strike'),
'lang': ('h1','h2','h3','h4','h5','h6','hr','p','img','caption','div','table','td','th','tr','font',
'center','ol','li','ul','bl','a','big','pre','dir','address','blockqoute','menu','blockquote',
'em', 'strong', 'dfn', 'code', 'samp', 'kbd', 'var', 'cite', 'sub', 'sup', 'tt', 'i', 'b', 'small', 'u', 'strike'),
'height': ('td','th', 'img'),
'href': ('a'),
'id': ('*'),
'link': ('body'),
'name': ('meta','a', 'img'),
'noshade': ('hr'),
'nowrap': ('td','th'),
'prompt': ('isindex'),
'rel': ('link','a'),
'rev': ('link','a'),
'rowspan': ('td','th'),
'size': ('hr','font'),
'src': ('img'),
'start': ('ol'),
'style': ('*'),
'text': ('body'),
'title': ('*'),
'type': ('li','ol','ul'),
'valign': ('td','th','tr'),
'vlink': ('body'),
'width': ('hr','pre', 'table','td','th', 'img')
}
'''
xhtmlEntities = {
u' ': u' ',
u'¡': u'¡',
u'¢': u'¢',
u'£': u'£',
u'¤': u'¤',
u'¥': u'¥',
u'¦': u'¦',
u'§': u'§',
u'¨': u'¨',
u'©': u'©',
u'ª': u'ª',
u'«': u'«',
u'¬': u'¬',
u'­': u'­',
u'®': u'®',
u'¯': u'¯',
u'°': u'°',
u'±': u'±',
u'²': u'²',
u'³': u'³',
u'´': u'´',
u'µ': u'µ',
u'¶': u'¶',
u'·': u'·',
u'¸': u'¸',
u'¹': u'¹',
u'º': u'º',
u'»': u'»',
u'¼': u'¼',
u'½': u'½',
u'¾': u'¾',
u'¿': u'¿',
u'À': u'À',
u'Á': u'Á',
u'Â': u'Â',
u'Ã': u'Ã',
u'Ä': u'Ä',
u'Å': u'Å',
u'Æ': u'Æ',
u'Ç': u'Ç',
u'È': u'È',
u'É': u'É',
u'Ê': u'Ê',
u'Ë': u'Ë',
u'Ì': u'Ì',
u'Í': u'Í',
u'Î': u'Î',
u'Ï': u'Ï',
u'Ð': u'Ð',
u'Ñ': u'Ñ',
u'Ò': u'Ò',
u'Ó': u'Ó',
u'Ô': u'Ô',
u'Õ': u'Õ',
u'Ö': u'Ö',
u'×': u'×',
u'Ø': u'Ø',
u'Ù': u'Ù',
u'Ú': u'Ú',
u'Û': u'Û',
u'Ü': u'Ü',
u'Ý': u'Ý',
u'Þ': u'Þ',
u'ß': u'ß',
u'à': u'à',
u'á': u'á',
u'â': u'â',
u'ã': u'ã',
u'ä': u'ä',
u'å': u'å',
u'æ': u'æ',
u'ç': u'ç',
u'è': u'è',
u'é': u'é',
u'ê': u'ê',
u'ë': u'ë',
u'ì': u'ì',
u'í': u'í',
u'î': u'î',
u'ï': u'ï',
u'ð': u'ð',
u'ñ': u'ñ',
u'ò': u'ò',
u'ó': u'ó',
u'ô': u'ô',
u'õ': u'õ',
u'ö': u'ö',
u'÷': u'÷',
u'ø': u'ø',
u'ù': u'ù',
u'ú': u'ú',
u'û': u'û',
u'ü': u'ü',
u'ý': u'ý',
u'þ': u'þ',
u'ÿ': u'ÿ',
u'"': u'"',
u'&': u'&#38;',
u'<': u'&#60;',
u'>': u'>',
u''': u''',
u'Œ': u'Œ',
u'œ': u'œ',
u'Š': u'Š',
u'š': u'š',
u'Ÿ': u'Ÿ',
u'ˆ': u'ˆ',
u'˜': u'˜',
u' ': u' ',
u' ': u' ',
u' ': u' ',
u'‌': u'‌',
u'‍': u'‍',
u'‎': u'‎',
u'‏': u'‏',
u'–': u'–',
u'—': u'—',
u'‘': u'‘',
u'’': u'’',
u'‚': u'‚',
u'“': u'“',
u'”': u'”',
u'„': u'„',
u'†': u'†',
u'‡': u'‡',
u'‰': u'‰',
u'‹': u'‹',
u'›': u'›',
u'€': u'€',
u'ƒ': u'ƒ',
u'Α': u'Α',
u'Β': u'Β',
u'Γ |
shivekkhurana/learning | python/network-security/ceaser.py | Python | mit | 1,385 | 0.01083 |
import string
import itertools
class Ceaser:
def __init__(self, payload, by=3, direction="right"):
self.payload = payload
self.by = by
self.direction = direction
def _encrypt_alphabet(self, alphabet):
ord_val = ord(alphabet) + self.by
if self.direction == 'left': ord_val = ord(alphabet) - self.by
if ord_val > 256 or ord_val < 0: ord_val = ord_val%256
return chr(ord_val)
def _de | crypt_alphabet(self, alphabet):
ord_val = ord(alphabet) + self.by
if self.direction == 'right': ord_val = ord(alphabet) - self.by
if ord_val > 256 or ord_val < 0: ord_val = ord_val%256
| return chr(ord_val)
def encrypt(self):
return ''.join([self._encrypt_alphabet(a) for a in self.payload])
def decrypt(self):
return ''.join([self._decrypt_alphabet(a) for a in self.payload])
def main():
payload = str(raw_input("Enter Payload : "))
by = int(raw_input('Enter shift by : '))
direction = str(raw_input('Enter shift direction (r or l): '))
if direction == 'l': direction = 'left'
else: direction = 'right'
c = Ceaser(payload, by, direction)
print('\nEncrypt\n')
print(c.encrypt())
print('\nDecrypting ...\n')
print(Ceaser(c.encrypt(), by, direction).decrypt())
if __name__ == '__main__':
main()
|
poxstone/ANG2-TEMPLATE | myApp/tests/unit/__init__.py | Python | apache-2.0 | 30 | 0 | __auth | or__ = | 'davidcifuentes'
|
lfairchild/PmagPy | programs/__init__.py | Python | bsd-3-clause | 1,586 | 0.011349 | #!/usr/bin/env pythonw
import sys
from os import path
import pkg_resources
command = path.split(sys.argv[0])[-1]
from .program_envs import prog_env
if command.endswith(".py"):
mpl_env = prog | _env.get(command[:-3])
elif command.endswith("_a"):
mpl_env = prog_env.get(command[:-2])
else:
mpl_env = prog_env.get(command)
import matplotlib
# if backend was already set, skip this step
if matplotlib.get_backend() in ('WXAgg', 'TKAgg'):
pass
# if backend wasn't set yet, set it appropriately
| else:
if mpl_env:
matplotlib.use(mpl_env)
else:
matplotlib.use("TKAgg")
if "-v" in sys.argv:
print("You are running:")
try:
print(pkg_resources.get_distribution('pmagpy'))
except pkg_resources.DistributionNotFound:
pass
try:
print(pkg_resources.get_distribution('pmagpy-cli'))
except pkg_resources.DistributionNotFound:
pass
#from . import generic_magic
#from . import sio_magic
#from . import cit_magic
#from . import _2g_bin_magic
#from . import huji_magic
#from . import huji_magic_new
#from . import ldeo_magic
#from . import iodp_srm_magic
#from . import iodp_dscr_magic
#from . import iodp_samples_magic
#from . import pmd_magic
#from . import tdt_magic
#from . import jr6_jr6_magic
#from . import jr6_txt_magic
#from . import bgc_magic
#__all__ = [generic_magic, sio_magic, cit_magic, _2g_bin_magic, huji_magic,
# huji_magic_new, ldeo_magic, iodp_srm_magic, iodp_dscr_magic,
# pmd_magic, tdt_magic, jr6_jr6_magic, jr6_txt_magic, bgc_magic,
# iodp_samples_magic]
|
elkingtowa/azove | features/steps/peer.py | Python | mit | 12,702 | 0.000315 | from utils import instrument
from azove.utils import recursive_int_to_big_endian
import mock
@given(u'a packet') # noqa
def step_impl(context):
context.packet = context.packeter.dump_packet('this is a test packet')
@when(u'peer.send_packet is called') # noqa
def step_impl(context):
context.peer.send_packet(context.packet)
@when(u'all data with the peer is processed') # noqa
def step_impl(context):
context.peer.run()
@then(u'the packet sent through connection should be the given packet') # noqa
def step_impl(context):
assert context.sent_packets == [context.packet]
@when(u'peer.send_Hello is called') # noqa
def step_impl(context):
context.peer.send_Hello()
@then(u'the packet sent through connection should be a Hello packet') # noqa
def step_impl(context):
packet = context.packeter.dump_Hello()
assert context.sent_packets == [packet]
@given(u'a valid Hello packet') # noqa
def step_impl(context):
context.packet = context.packeter.dump_Hello()
@given(u'a Hello packet with protocol version incompatible') # noqa
def step_impl(context):
packeter = context.packeter
data | = [packeter.cmd_map_by_name['Hello'],
'incompatible_protocal_version',
packeter.NETWORK_ID,
packeter.CLIENT_ID,
packeter.config.getint('network', 'listen_port'),
packeter.CAPABILITIES,
packeter.config.g | et('wallet', 'coinbase')
]
context.packet = packeter.dump_packet(data)
@given(u'a Hello packet with network id incompatible') # noqa
def step_impl(context):
packeter = context.packeter
data = [packeter.cmd_map_by_name['Hello'],
packeter.PROTOCOL_VERSION,
'incompatible_network_id',
packeter.CLIENT_ID,
packeter.config.getint('network', 'listen_port'),
packeter.CAPABILITIES,
packeter.config.get('wallet', 'coinbase')
]
context.packet = packeter.dump_packet(data)
@when(u'peer.send_Hello is instrumented') # noqa
def step_impl(context):
context.peer.send_Hello = instrument(context.peer.send_Hello)
@then(u'peer.send_Hello should be called once') # noqa
def step_impl(context):
func = context.peer.send_Hello
assert func.call_count == 1
@when(u'peer.send_Disconnect is instrumented') # noqa
def step_impl(context):
context.peer.send_Disconnect = instrument(context.peer.send_Disconnect)
@when(u'the packet is received from peer') # noqa
def step_impl(context):
context.add_recv_packet(context.packet)
@then(u'peer.send_Disconnect should be called once with args: reason') # noqa
def step_impl(context):
func = context.peer.send_Disconnect
assert func.call_count == 1
assert len(func.call_args[0]) == 1 or 'reason' in func.call_args[1]
@when(u'peer.send_Ping is called') # noqa
def step_impl(context):
context.peer.send_Ping()
@then(u'the packet sent through connection should be a Ping packet') # noqa
def step_impl(context):
packet = context.packeter.dump_Ping()
assert context.sent_packets == [packet]
@given(u'a Ping packet') # noqa
def step_impl(context):
context.packet = context.packeter.dump_Ping()
@when(u'peer.send_Pong is instrumented') # noqa
def step_impl(context):
context.peer.send_Pong = instrument(context.peer.send_Pong)
@then(u'peer.send_Pong should be called once') # noqa
def step_impl(context):
func = context.peer.send_Pong
assert func.call_count == 1
@when(u'peer.send_Pong is called') # noqa
def step_impl(context):
context.peer.send_Pong()
@then(u'the packet sent through connection should be a Pong packet') # noqa
def step_impl(context):
packet = context.packeter.dump_Pong()
assert context.sent_packets == [packet]
@given(u'a Pong packet') # noqa
def step_impl(context):
context.packet = context.packeter.dump_Pong()
@when(u'handler for a disconnect_requested signal is registered') # noqa
def step_impl(context):
from pyethereum.signals import peer_disconnect_requested
context.disconnect_requested_handler = mock.MagicMock()
peer_disconnect_requested.connect(context.disconnect_requested_handler)
@when(u'peer.send_Disconnect is called') # noqa
def step_impl(context):
context.peer.send_Disconnect()
@then(u'the packet sent through connection should be' # noqa
' a Disconnect packet')
def step_impl(context):
packet = context.packeter.dump_Disconnect()
assert context.sent_packets == [packet]
@then(u'the disconnect_requested handler should be called once' # noqa
' after sleeping for at least 2 seconds')
def step_impl(context):
import time # time is already pathced for mocks
assert context.disconnect_requested_handler.call_count == 1
sleeping = sum(x[0][0] for x in time.sleep.call_args_list)
assert sleeping >= 2
@given(u'a Disconnect packet') # noqa
def step_impl(context):
context.packet = context.packeter.dump_Disconnect()
@then(u'the disconnect_requested handler should be called once') # noqa
def step_impl(context):
assert context.disconnect_requested_handler.call_count == 1
@when(u'peer.send_GetPeers is called') # noqa
def step_impl(context):
context.peer.send_GetPeers()
@then(u'the packet sent through connection should be' # noqa
' a GetPeers packet')
def step_impl(context):
packet = context.packeter.dump_GetPeers()
assert context.sent_packets == [packet]
@given(u'a GetPeers packet') # noqa
def step_impl(context):
context.packet = context.packeter.dump_GetPeers()
@given(u'peers data') # noqa
def step_impl(context):
context.peers_data = [
['127.0.0.1', 1234, 'local'],
['1.0.0.1', 1234, 'remote'],
]
@when(u'getpeers_received signal handler is connected') # noqa
def step_impl(context):
from pyethereum.signals import getpeers_received
handler = mock.MagicMock()
context.getpeers_received_handler = handler
getpeers_received.connect(handler)
@then(u'the getpeers_received signal handler should be called once') # noqa
def step_impl(context):
assert context.getpeers_received_handler.call_count == 1
@when(u'peer.send_Peers is called') # noqa
def step_impl(context):
context.peer.send_Peers(context.peers_data)
@then(u'the packet sent through connection should be a Peers packet' # noqa
' with the peers data')
def step_impl(context):
assert context.sent_packets == [
context.packeter.dump_Peers(context.peers_data)]
@given(u'a Peers packet with the peers data') # noqa
def step_impl(context):
context.packet = context.packeter.dump_Peers(context.peers_data)
@when(u'handler for new_peers_received signal is registered') # noqa
def step_impl(context):
context.new_peer_received_handler = mock.MagicMock()
from pyethereum.signals import peer_addresses_received
peer_addresses_received.connect(context.new_peer_received_handler)
@then(u'the new_peers_received handler should be called once' # noqa
' with all peers')
def step_impl(context):
call_args = context.new_peer_received_handler.call_args_list[0]
call_peers = call_args[1]['addresses']
assert len(call_peers) == len(context.peers_data)
pairs = zip(call_peers, context.peers_data)
for call, peer in pairs:
assert call == peer
#assert call[1]['address'] == peer
@when(u'peer.send_GetTransactions is called') # noqa
def step_impl(context):
context.peer.send_GetTransactions()
@then(u'the packet sent through connection should be' # noqa
' a GetTransactions packet')
def step_impl(context):
packet = context.packeter.dump_GetTransactions()
assert context.sent_packets == [packet]
@given(u'a GetTransactions packet') # noqa
def step_impl(context):
context.packet = context.packeter.dump_GetTransactions()
@given(u'transactions data') # noqa
def step_impl(context):
context.transactions_data = [
['nonce-1', 'receiving_address-1', 1],
['nonce-2', 'receiving_address-2', 2],
['nonce-3', 'receiving_address-3', 3],
]
@when(u'gettransactions_received signal handler is connected') # noqa
def step_impl(context):
from pyethereum.signals import |
mivade/streamis | streamis.py | Python | mit | 4,156 | 0.000481 | """Streamis - Subscribe to Redis pubsub channels via HTTP and EventSource."""
import logging
import asyncio
from asyncio import Queue
import aioredis
from tornado.platform.asyncio import AsyncIOMainLoop
from tornado import web
from tornado.iostream import StreamClosedError
from tornado.options import options, define
AsyncIOMainLoop().install()
logger = logging.getLogger('streamis')
define('redis-host', default='localhost', help='Redis server hostname')
define('redis-port', default=6379, help='Redis server port')
define('port', default=8989, help='HTTP port to serve on')
define('debug', default=False, help='Enable debug mode')
class Connection:
_redis = None
@classmethod
async def redis(cls, force_reconnect=False):
if cls._redis is None or force_reconnect:
settings = (options.redis_host, options.redis_port)
cls._redis = await aioredis.create_redis(settings)
return cls._redis
class Subscription:
"""Handles subscriptions to Redis PUB/SUB channels."""
def __init__(self, redis, channel: str):
self._redis = redis
self.name = channel
self.listeners = set()
async def subscribe(self):
res = await self._redis.subscribe(self.name)
self.channel = res[0]
def __str__(self):
return self.name
def add_listener(self, listener):
self.listeners.add(listener)
async def broadcast(self):
"""Listen for new messages on Redis | and broadcast to all
HTTP listen | ers.
"""
while len(self.listeners) > 0:
msg = await self.channel.get()
logger.debug("Got message: %s" % msg)
closed = []
for listener in self.listeners:
try:
listener.queue.put_nowait(msg)
except:
logger.warning('Message delivery failed. Client disconnection?')
closed.append(listener)
if len(closed) > 0:
[self.listeners.remove(listener) for listener in closed]
class SubscriptionManager:
"""Manages all subscriptions."""
def __init__(self, loop=None):
self.redis = None
self.subscriptions = dict()
self.loop = loop or asyncio.get_event_loop()
async def connect(self):
self.redis = await Connection.redis()
async def subscribe(self, listener, channel: str):
"""Subscribe to a new channel."""
if channel in self.subscriptions:
subscription = self.subscriptions[channel]
else:
subscription = Subscription(self.redis, channel)
await subscription.subscribe()
self.subscriptions[channel] = subscription
self.loop.call_soon(lambda: asyncio.Task(subscription.broadcast()))
subscription.add_listener(listener)
def unsubscribe(self, channel: str):
"""Unsubscribe from a channel."""
if channel not in self.subscriptions:
logger.warning("Not subscribed to channel '%s'" % channel)
return
sub = self.subscriptions.pop(channel)
del sub
class SSEHandler(web.RequestHandler):
def initialize(self, manager: SubscriptionManager):
self.queue = Queue()
self.manager = manager
self.set_header('content-type', 'text/event-stream')
self.set_header('cache-control', 'no-cache')
async def get(self, channel: str):
await self.manager.subscribe(self, channel)
while True:
message = await self.queue.get()
try:
self.write("data: %s\n\n" % message)
await self.flush()
except StreamClosedError:
break
def main():
options.parse_command_line()
loop = asyncio.get_event_loop()
manager = SubscriptionManager()
loop.run_until_complete(manager.connect())
app = web.Application(
[(r'/(.*)', SSEHandler, dict(manager=manager))],
debug=options.debug
)
app.listen(options.port)
logger.info('Listening on port %d' % options.port)
loop.run_forever()
if __name__ == "__main__":
main()
|
fbradyirl/home-assistant | homeassistant/components/tplink/config_flow.py | Python | apache-2.0 | 364 | 0 | """Config flow for TP-Link."""
from homeassistant.helpers import config_entry_flow
from homeassistant import config_entries
from .const import DOMAIN
from .common import async_get_discoverable_devices
config_entry_flow.register | _discovery_flow(
DOMAIN,
"TP-Link Smart | Home",
async_get_discoverable_devices,
config_entries.CONN_CLASS_LOCAL_POLL,
)
|
mjumbewu/django-subscriptions | subscriptions/feeds/__init__.py | Python | bsd-2-clause | 252 | 0.003968 | from readers import (autodiscover, FeedReader, TimestampedModelFeedReader,
RSSFeedReader)
from library import (FeedLibrary)
from dispatch import (Subscripti | onDispatc | her, SubscriptionEmailer)
from utils import (FeedRecordUpdater, FeedRecordCleaner)
|
mstritt/orbit-image-analysis | src/main/python/deeplearn/utils/image_reader.py | Python | gpl-3.0 | 7,239 | 0.00746 | import os
import numpy as np
import tensorflow as tf
def image_scaling(img, label):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: Training image to scale.
label: Segmentation mask to scale.
"""
scale = tf.random_uniform([1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[0]), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[1]), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
img = tf.image.resize_images(img, new_shape)
label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
label = tf.squeeze(label, squeeze_dims=[0])
return img, label
def image_mirroring(img, label):
"""
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
"""
distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0]
mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5)
mirror = tf.boolean_mask([0, 1, 2], mirror)
img = tf.reverse(img, mirror)
label = tf.reverse(label, mirror)
return img, label
def random_crop_and_pad_image_and_labels(image, label, crop_h, crop_w, ignore_label=255):
"""
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
ignore_label: Label to ignore during the training.
"""
label = tf.cast(label, dtype=tf.float32)
label = label - ignore_label # Needs to be subtracted and later added due to 0 padding.
combined = tf.concat(axis=2, values=[image, label])
image_shape = tf.shape(image)
combined_pad = tf.image.pad_to_bounding_box(combined, 0, 0, tf.maximum(crop_h, image_shape[0]), tf.maximum(crop_w, image_shape[1]))
last_image_dim = tf.shape(image)[-1]
# last_label_dim = tf.shape(label)[-1]
combined_crop = tf.random_crop(combined_pad, [crop_h, crop_w, 4])
img_crop = combined_crop[:, :, :last_image_dim]
label_crop = combined_crop[:, :, last_image_dim:]
label_crop = label_crop + ignore_label
label_crop = tf.cast(label_crop, dtype=tf.uint8)
# Set static shape so that tensorflow knows shape at compile time.
img_crop.set_shape((crop_h, crop_w, 3))
label_crop.set_shape((crop_h,crop_w, 1))
return img_crop, label_crop
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
masks = []
for line in f:
try:
image, mask = line.strip("\n").split(' ')
except Va | lueError: # Adhoc for test.
image = mask = line.strip("\n")
images.append(data_dir + image)
masks.append(data_dir + mask)
return images, masks
def read_images_from_disk(input_queue, input_size, random_scale, random_mirror, ignore_label): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
inp | ut_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
Returns:
Two tensors: the decoded image and its mask.
"""
img_filename = tf.identity( input_queue[0], name='img_filename')
label_filename = tf.identity( input_queue[1], name='label_filename')
img_contents = tf.read_file(img_filename)
label_contents = tf.read_file(label_filename)
img = tf.image.decode_jpeg(img_contents, channels=3)
img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)
img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)
label = tf.image.decode_png(label_contents, channels=1)
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
if random_scale:
img, label = image_scaling(img, label)
# Randomly mirror the images and labels.
if random_mirror:
img, label = image_mirroring(img, label)
# Randomly crops the images and labels.
img, label = random_crop_and_pad_image_and_labels(img, label, h, w, ignore_label)
return img, label
class ImageReader(object):
'''Generic ImageReader which reads images and corresponding segmentation
masks from the disk, and enqueues them into a TensorFlow queue.
'''
def __init__(self, data_dir, data_list, input_size,
random_scale, random_mirror, ignore_label, coord):
'''Initialise an ImageReader.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
input_size: a tuple with (height, width) values, to which all the images will be resized.
random_scale: whether to randomly scale the images prior to random crop.
random_mirror: whether to randomly mirror the images prior to random crop.
ignore_label: index of label to ignore during the training.
img_mean: vector of mean colour values.
coord: TensorFlow queue coordinator.
'''
self.data_dir = data_dir
self.data_list = data_list
self.input_size = input_size
self.coord = coord
self.image_list, self.label_list = read_labeled_image_list(self.data_dir, self.data_list)
self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string)
self.labels = tf.convert_to_tensor(self.label_list, dtype=tf.string)
self.queue = tf.train.slice_input_producer([self.images, self.labels],
shuffle=input_size is not None) # not shuffling if it is val
self.image, self.label = read_images_from_disk(self.queue, self.input_size, random_scale, random_mirror, ignore_label )
def dequeue(self, num_elements):
'''Pack images and labels into a batch.
Args:
num_elements: the batch size.
Returns:
Two tensors of size (batch_size, h, w, {3, 1}) for images and masks.'''
image_batch, label_batch = tf.train.batch([self.image, self.label],
num_elements)
return image_batch, label_batch
|
waltermoreira/serfnode | serfnode/me.py | Python | mit | 345 | 0 | #!/usr/bin/python
import json
import os
import time
def main():
while not os.path.exists('/agent_up'):
time.sleep(0.1)
node_id = json.load(open('/me.json'))['id']
node = json.load(open('/serfnodes_by_id.json'))[node_id]
| print('{}:{}'.format(node | ['serf_ip'], node['serf_port']))
if __name__ == '__main__':
main()
|
sandvine/horizon | openstack_dashboard/dashboards/project/routers/tests.py | Python | apache-2.0 | 38,873 | 0.000077 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import django
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class RouterMixin(object):
@test.create_stubs({
api.neutron: ('router_get', 'port_list',
'network_get', 'is_extension_supported',
'list_l3_agent_hosting_router'),
})
def _get_detail(self, router, extraroute=True, lookup_l3=False):
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(extraroute)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
if lookup_l3:
agent = self.agents.list()[1]
api.neutron.list_l3_agent_hosting_router(IsA(http.HttpRequest), router.id)\
.AndReturn([agent])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
return res
def _mock_external_network_list(self, alter_ids=False):
search_opts = {'router:external': True}
ext_nets = [n for n in self.networks.list() if n['router:external']]
if alter_ids:
for ext_net in ext_nets:
ext_net.id += 'some extra garbage'
api.neutron.network_list(
IsA(http.HttpRequest),
**search_opts).AndReturn(ext_nets)
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
class RouterTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index_router_list_exception(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).MultipleTimes().AndRaise(
self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_set_external_network_empty(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).MultipleTimes().AndReturn([router])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
| res = self.client.get(self.INDEX_URL)
| table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertMessageCount(res, error=1)
def test_router_detail(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
ports = res.context['interfaces_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('router_get',)})
def test_router_detail_exception(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_delete(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn([])
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
|
sornars/urllib3 | test/test_response.py | Python | mit | 21,348 | 0.000843 | import unittest
from io import BytesIO, BufferedReader
try:
import http.client as httplib
except ImportError:
import httplib
from urllib3.response import HTTPResponse
from urllib3.exceptions import DecodeError, ResponseNotChunked
from base64 import b64decode
# A known random (i.e, not-too-compressible) payload generated with:
# "".join(random.choice(string.printable) for i in xrange(512))
# .encode("zlib").encode("base64")
# Randomness in tests == bad, and fixing a seed may not be sufficient.
ZLIB_PAYLOAD = b64decode(b"""\
eJwFweuaoQAAANDfineQhiKLUiaiCzvuTEmNNlJGiL5QhnGpZ99z8luQfe1AHoMioB+QSWHQu/L+
lzd7W5CipqYmeVTBjdgSATdg4l4Z2zhikbuF+EKn69Q0DTpdmNJz8S33odfJoVEexw/l2SS9nFdi
pis7KOwXzfSqarSo9uJYgbDGrs1VNnQpT9f8zAorhYCEZronZQF9DuDFfNK3Hecc+WHLnZLQptwk
nufw8S9I43sEwxsT71BiqedHo0QeIrFE01F/4atVFXuJs2yxIOak3bvtXjUKAA6OKnQJ/nNvDGKZ
Khe5TF36JbnKVjdcL1EUNpwrWVfQpFYJ/WWm2b74qNeSZeQv5/xBhRdOmKTJFYgO96PwrHBlsnLn
a3l0LwJsloWpMbzByU5WLbRE6X5INFqjQOtIwYz5BAlhkn+kVqJvWM5vBlfrwP42ifonM5yF4ciJ
auHVks62997mNGOsM7WXNG3P98dBHPo2NhbTvHleL0BI5dus2JY | 81MUOnK3SGWLH8HeWPa1t5KcW
S5moAj5HexY/g/F8TctpxwsvyZp38dXeLDjSQvEQIkF7XR3YXbeZgKk3V34KGCPOAeeuQDIgyVhV
nP4HF2uWHA==""")
class TestLegacyResponse(unittest.TestCase):
def test_getheaders(self):
headers = {'host': 'example.com'}
r = HTTPResponse(headers=headers)
self.assertEqual(r.getheaders(), headers)
def test_getheader(self):
headers = {'host': 'example.com'}
r = HTTPResponse(headers=headers)
self.assertEqual(r.getheader('host'), 'example.com')
c | lass TestResponse(unittest.TestCase):
def test_cache_content(self):
r = HTTPResponse('foo')
self.assertEqual(r.data, 'foo')
self.assertEqual(r._body, 'foo')
def test_default(self):
r = HTTPResponse()
self.assertEqual(r.data, None)
def test_none(self):
r = HTTPResponse(None)
self.assertEqual(r.data, None)
def test_preload(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=True)
self.assertEqual(fp.tell(), len(b'foo'))
self.assertEqual(r.data, b'foo')
def test_no_preload(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=False)
self.assertEqual(fp.tell(), 0)
self.assertEqual(r.data, b'foo')
self.assertEqual(fp.tell(), len(b'foo'))
def test_decode_bad_data(self):
fp = BytesIO(b'\x00' * 10)
self.assertRaises(DecodeError, HTTPResponse, fp, headers={
'content-encoding': 'deflate'
})
def test_reference_read(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=False)
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_decode_deflate(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'})
self.assertEqual(r.data, b'foo')
def test_decode_deflate_case_insensitve(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'DeFlAtE'})
self.assertEqual(r.data, b'foo')
def test_chunked_decoding_deflate(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
self.assertEqual(r.read(3), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_chunked_decoding_deflate2(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
self.assertEqual(r.read(1), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_chunked_decoding_gzip(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'gzip'},
preload_content=False)
self.assertEqual(r.read(11), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_body_blob(self):
resp = HTTPResponse(b'foo')
self.assertEqual(resp.data, b'foo')
self.assertTrue(resp.closed)
def test_io(self):
import socket
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
self.assertEqual(resp.closed, False)
self.assertEqual(resp.readable(), True)
self.assertEqual(resp.writable(), False)
self.assertRaises(IOError, resp.fileno)
resp.close()
self.assertEqual(resp.closed, True)
# Try closing with an `httplib.HTTPResponse`, because it has an
# `isclosed` method.
hlr = httplib.HTTPResponse(socket.socket())
resp2 = HTTPResponse(hlr, preload_content=False)
self.assertEqual(resp2.closed, False)
resp2.close()
self.assertEqual(resp2.closed, True)
#also try when only data is present.
resp3 = HTTPResponse('foodata')
self.assertRaises(IOError, resp3.fileno)
resp3._fp = 2
# A corner case where _fp is present but doesn't have `closed`,
# `isclosed`, or `fileno`. Unlikely, but possible.
self.assertEqual(resp3.closed, True)
self.assertRaises(IOError, resp3.fileno)
def test_io_bufferedreader(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
br = BufferedReader(resp)
self.assertEqual(br.read(), b'foo')
br.close()
self.assertEqual(resp.closed, True)
b = b'fooandahalf'
fp = BytesIO(b)
resp = HTTPResponse(fp, preload_content=False)
br = BufferedReader(resp, 5)
br.read(1) # sets up the buffer, reading 5
self.assertEqual(len(fp.read()), len(b) - 5)
# This is necessary to make sure the "no bytes left" part of `readinto`
# gets tested.
while not br.closed:
br.read(5)
def test_io_readinto(self):
# This test is necessary because in py2.6, `readinto` doesn't get called
# in `test_io_bufferedreader` like it does for all the other python
# versions. Probably this is because the `io` module in py2.6 is an
# old version that has a different underlying implementation.
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
barr = bytearray(3)
assert resp.readinto(barr) == 3
assert b'foo' == barr
# The reader should already be empty, so this should read nothing.
assert resp.readinto(barr) == 0
assert b'foo' == barr
def test_streaming(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
self.assertEqual(next(stream), b'fo')
self.assertEqual(next(stream), b'o')
self.assertRaises(StopIteration, next, stream)
def test_streaming_tell(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
position = 0
position += len(next(stream))
self.assertEqual(2, position)
self.assertEqual(position, resp.tell())
position += l |
yotchang4s/cafebabepy | src/main/python/sndhdr.py | Python | bsd-3-clause | 7,088 | 0.003245 | """Routines to help recognizing sound files.
Function whathdr() recognizes various types of sound file headers.
It understands almost all headers that SOX can decode.
The return tuple contains the following items, in this order:
- file type (as SOX understands it)
- sampling rate (0 if unknown or hard to decode)
- number of channels (0 if unknown or hard to decode)
- number of frames in the file (-1 if unknown or hard to decode)
- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
If the file doesn't have a recognizable type, it returns None.
If the file can't be opened, OSError is raised.
To compute the total time, divide the number of frames by the
sampling rate (a frame contains a sample for each channel).
Function what() calls whathdr(). (It used to also use some
heuristics for raw data, but this doesn't work very well.)
Finally, the function test() is a simple main program that calls
what() for all files mentioned on the argument list. For directory
arguments it calls what() for all files in that directory. Default
argument is "." (testing all files in the current directory). The
option -r tells it to recurse down directories found inside
explicitly given directories.
"""
# The file structure is top-down except that the test program and its
# subroutine come last.
__all__ = ['what', 'whathdr']
from collections import namedtuple
SndHeaders | = namedtuple('SndHeaders',
'filetype framerate nchan | nels nframes sampwidth')
SndHeaders.filetype.__doc__ = ("""The value for type indicates the data type
and will be one of the strings 'aifc', 'aiff', 'au','hcom',
'sndr', 'sndt', 'voc', 'wav', '8svx', 'sb', 'ub', or 'ul'.""")
SndHeaders.framerate.__doc__ = ("""The sampling_rate will be either the actual
value or 0 if unknown or difficult to decode.""")
SndHeaders.nchannels.__doc__ = ("""The number of channels or 0 if it cannot be
determined or if the value is difficult to decode.""")
SndHeaders.nframes.__doc__ = ("""The value for frames will be either the number
of frames or -1.""")
SndHeaders.sampwidth.__doc__ = ("""Either the sample size in bits or
'A' for A-LAW or 'U' for u-LAW.""")
def what(filename):
"""Guess the type of a sound file."""
res = whathdr(filename)
return res
def whathdr(filename):
"""Recognize sound headers."""
with open(filename, 'rb') as f:
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return SndHeaders(*res)
return None
#-----------------------------------#
# Subroutines per sound header type #
#-----------------------------------#
tests = []
def test_aifc(h, f):
import aifc
if not h.startswith(b'FORM'):
return None
if h[8:12] == b'AIFC':
fmt = 'aifc'
elif h[8:12] == b'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.open(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(),
a.getnframes(), 8 * a.getsampwidth())
tests.append(test_aifc)
def test_au(h, f):
if h.startswith(b'.snd'):
func = get_long_be
elif h[:4] in (b'\0ds.', b'dns.'):
func = get_long_le
else:
return None
filetype = 'au'
hdr_size = func(h[4:8])
data_size = func(h[8:12])
encoding = func(h[12:16])
rate = func(h[16:20])
nchannels = func(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
if frame_size:
nframe = data_size / frame_size
else:
nframe = -1
return filetype, rate, nchannels, nframe, sample_bits
tests.append(test_au)
def test_hcom(h, f):
if h[65:69] != b'FSSD' or h[128:132] != b'HCOM':
return None
divisor = get_long_be(h[144:148])
if divisor:
rate = 22050 / divisor
else:
rate = 0
return 'hcom', rate, 1, -1, 8
tests.append(test_hcom)
def test_voc(h, f):
if not h.startswith(b'Creative Voice File\032'):
return None
sbseek = get_short_le(h[20:22])
rate = 0
if 0 <= sbseek < 500 and h[sbseek] == 1:
ratecode = 256 - h[sbseek+4]
if ratecode:
rate = int(1000000.0 / ratecode)
return 'voc', rate, 1, -1, 8
tests.append(test_voc)
def test_wav(h, f):
import wave
# 'RIFF' <len> 'WAVE' 'fmt ' <len>
if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ':
return None
f.seek(0)
try:
w = wave.openfp(f, 'r')
except (EOFError, wave.Error):
return None
return ('wav', w.getframerate(), w.getnchannels(),
w.getnframes(), 8*w.getsampwidth())
tests.append(test_wav)
def test_8svx(h, f):
if not h.startswith(b'FORM') or h[8:12] != b'8SVX':
return None
# Should decode it to get #channels -- assume always 1
return '8svx', 0, 1, 0, 8
tests.append(test_8svx)
def test_sndt(h, f):
if h.startswith(b'SOUND'):
nsamples = get_long_le(h[8:12])
rate = get_short_le(h[20:22])
return 'sndt', rate, 1, nsamples, 8
tests.append(test_sndt)
def test_sndr(h, f):
if h.startswith(b'\0\0'):
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8
tests.append(test_sndr)
#-------------------------------------------#
# Subroutines to extract numbers from bytes #
#-------------------------------------------#
def get_long_be(b):
return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]
def get_long_le(b):
return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0]
def get_short_be(b):
return (b[0] << 8) | b[1]
def get_short_le(b):
return (b[1] << 8) | b[0]
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print(filename + '/:', end=' ')
if recursive or toplevel:
print('recursing down:')
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
else:
print(filename + ':', end=' ')
sys.stdout.flush()
try:
print(what(filename))
except OSError:
print('*** not found ***')
if __name__ == '__main__':
test()
|
tongwang01/tensorflow | tensorflow/contrib/bayesflow/examples/reinforce_simple/reinforce_simple_example.py | Python | apache-2.0 | 5,012 | 0.009777 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Lice | nse for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple examples of the REINFORCE algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
distributions = | tf.contrib.distributions
sg = tf.contrib.bayesflow.stochastic_graph
st = tf.contrib.bayesflow.stochastic_tensor
def split_apply_merge(inp, partitions, fns):
"""Split input according to partitions. Pass results through fns and merge.
Args:
inp: the input vector
partitions: tensor of same length as input vector, having values 0, 1
fns: the two functions.
Returns:
the vector routed, where routed[i] = fns[partitions[i]](inp[i])
"""
new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
new_indices = tf.dynamic_partition(
tf.range(0, inp.get_shape()[0]), partitions, len(fns))
return tf.dynamic_stitch(new_indices, new_outputs)
def plus_1(inputs):
return inputs + 1.0
def minus_1(inputs):
return inputs - 1.0
def build_split_apply_merge_model():
"""Build the Split-Apply-Merge Model.
Route each value of input [-1, -1, 1, 1] through one of the
functions, plus_1, minus_1. The decision for routing is made by
4 Bernoulli R.V.s whose parameters are determined by a neural network
applied to the input. REINFORCE is used to update the NN parameters.
Returns:
The 3-tuple (route_selection, routing_loss, final_loss), where:
- route_selection is an int 4-vector
- routing_loss is a float 4-vector
- final_loss is a float scalar.
"""
inputs = tf.constant([[-1.0], [-1.0], [1.0], [1.0]])
targets = tf.constant([[0.0], [0.0], [0.0], [0.0]])
paths = [plus_1, minus_1]
weights = tf.get_variable("w", [1, 2])
bias = tf.get_variable("b", [1, 1])
logits = tf.matmul(inputs, weights) + bias
# REINFORCE forward step
route_selection = st.StochasticTensor(
distributions.Categorical(logits=logits))
# Accessing route_selection as a Tensor below forces a sample of
# the Categorical distribution based on its logits.
# This is equivalent to calling route_selection.value().
#
# route_selection.value() returns an int32 4-vector with random
# values in {0, 1}
# COPY+ROUTE+PASTE
outputs = split_apply_merge(inputs, route_selection, paths)
# flatten routing_loss to a row vector (from a column vector)
routing_loss = tf.reshape(tf.square(outputs - targets), shape=[-1])
# Total loss: score function loss + routing loss.
# The score function loss (through `route_selection.loss(routing_loss)`)
# returns:
# [stop_gradient(routing_loss) *
# route_selection.log_pmf(stop_gradient(route_selection.value()))],
# where log_pmf has gradients going all the way back to weights and bias.
# In this case, the routing_loss depends on the variables only through
# "route_selection", which has a stop_gradient on it. So the
# gradient of the loss really come through the score function
surrogate_loss = sg.surrogate_loss([routing_loss])
final_loss = tf.reduce_sum(surrogate_loss)
return (route_selection, routing_loss, final_loss)
class REINFORCESimpleExample(tf.test.TestCase):
def testSplitApplyMerge(self):
# Repeatability. SGD has a tendency to jump around, even here.
tf.set_random_seed(1)
with self.test_session() as sess:
# Use sampling to train REINFORCE
with st.value_type(st.SampleAndReshapeValue(n=1)):
(route_selection,
routing_loss,
final_loss) = build_split_apply_merge_model()
sgd = tf.train.GradientDescentOptimizer(1.0).minimize(final_loss)
tf.initialize_all_variables().run()
for i in range(10):
# Run loss and inference step. This toy problem converges VERY quickly.
(routing_loss_v, final_loss_v, route_selection_v, _) = sess.run(
[routing_loss, final_loss, tf.identity(route_selection), sgd])
print(
"Iteration %d, routing loss: %s, final_loss: %s, "
"route selection: %s"
% (i, routing_loss_v, final_loss_v, route_selection_v))
self.assertAllEqual([0, 0, 1, 1], route_selection_v)
self.assertAllClose([0.0, 0.0, 0.0, 0.0], routing_loss_v)
self.assertAllClose(0.0, final_loss_v)
if __name__ == "__main__":
tf.test.main()
|
lakewik/storj-gui-client | UI/qt_interfaces/logs_table_ui.py | Python | mit | 3,651 | 0.002465 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'logs_table.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Logs(object):
def setupUi(self, Logs):
Logs.setObjectName(_fromUtf8("Logs"))
Logs.resize(930, 369)
self.label = QtGui.QLabel(Logs)
self.label.setGeometry(QtCore.QRect(10, 10, 911, 31))
self.label.setObjectName(_fromUtf8("label"))
self.line = QtGui.QFrame(Logs)
self.line.setGeometry(QtCore.QRect(10, 40, 911, 21))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.logs_table = QtGui.QTableWidget(Logs)
self.logs_table.setGeometry(QtCore.QRect(10, 60, 911, 251))
self.logs_table.setObjectName(_fromUtf8("logs_table"))
self.logs_table.setColumnCount(0)
self.logs_table.setRowCount(0)
self.exit_bt = QtGui.QPushButton(Logs)
self.exit_bt.setGeometry(QtCore.QRect(790, 330, 131, 26))
self.exit_bt.setObjectName(_fromUtf8("exit_bt"))
self.save_logs_as_bt = QtGui.QPushButton(Logs)
self.save_logs_as_bt.setGeometry(QtCore.QRect(650, 330, 131, 26))
self.save_logs_as_bt.setObjectName(_fromUtf8("save_logs_as_bt"))
self.logs_settings_bt = QtGui.QPushButton(Logs)
self.logs_settings_bt.setGeometry(QtCore.QRect(510, 330, 131, 26))
self.logs_settings_bt.setObjectName(_fromUtf8("logs_settings_bt"))
self.label_2 = QtGui.QLabel(Logs)
self.label_2.setGeometry(QtCore.QRect(10, 320, 181, 41))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.total_log_positions = QtGui.QLabel(Logs)
self.total_log_positions.setGeometry(QtCore.QRect(200, 320, 131, 41))
self.total_log_positions.setObjectName(_fromUtf8("total_log_positions"))
self.clear_logs_bt = QtGui.QPushButton(Logs)
self.clear_logs_bt.setGeometry(QtCore.QRect(370, 330, 131, 26))
self.clear_logs_bt.setObjectName(_fromUtf8("clear_logs_bt"))
self.retranslateUi(Logs)
QtCore.QMetaObject.connectSlotsByName(Logs)
def retranslateUi(self, Logs):
Logs.setWindowTitle(_translate("Logs", "Dialog", None))
self.label.setText(_translate("Logs", "<html><head/><body><p align=\"center\"><span style=\" font-size:16pt; font-weight:600;\">Logs - Storj GUI Client</span></p></body></html>", None))
self.exit_bt.setText(_translate("Logs", "Exit", None))
self.save_logs_as_bt.setText(_transl | ate("Logs", "Save logs as...", None))
self.logs_settings_bt.setText(_translate("Logs", "Logs settings", None))
self.label_2.setText(_translate("Logs", "<html><head/><body><p align=\"center\"><span style=\" font-size:14pt; font-weight:600;\">Total log positions:</span></p></body></html>", None))
self.total_log_positions.setText(_translate("Logs", "<html><head/><body><p><span style=\" font-size:14pt; fon | t-weight:600;\">0</span></p></body></html>", None))
self.clear_logs_bt.setText(_translate("Logs", "Clear logs", None))
|
desihub/desisurvey | py/desisurvey/scripts/surveymovie.py | Python | bsd-3-clause | 24,365 | 0.001313 | """Script wrapper for creating a movie of survey progress.
To run this script from the command line, use the ``surveymovie`` entry point
that is created when this package is installed, and should be in your shell
command search path.
The optional matplotlib python package must be installed to use this script.
The external program ffmpeg must be installed to use this script.
At nersc, try ``module add ffmpeg``.
"""
from __future__ import print_function, division, absolute_import
import argparse
import os.path
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec
import matplotlib.animation
import matplotlib.colors
import matplotlib.animation
import astropy.time
import astropy.io.fits
import astropy.units as u
import desiutil.log
import desisurvey.ephem
import desisurvey.utils
import desisurvey.config
import desisurvey.tiles
import desisurvey.plots
def parse(options=None):
"""Parse command-line options for running survey planning.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action='store_true',
help='display log messages with | severity >= info')
parser.add_argument('--debug', action='store_true',
help='display log messages with severity >= debug (implies verbose) | ')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='interval for logging periodic info messages')
parser.add_argument(
'--exposures', default='exposures_surveysim.fits', metavar='FITS',
help='name of FITS file with list of exposures taken')
parser.add_argument(
'--start', type=str, default=None, metavar='DATE',
help='movie starts on the evening of this day, formatted as YYYY-MM-DD')
parser.add_argument(
'--stop', type=str, default=None, metavar='DATE',
help='movie stops on the morning of this day, formatted as YYYY-MM-DD')
parser.add_argument(
'--expid', type=int, default=None, metavar='ID',
help='index of single exposure to display')
parser.add_argument(
'--nightly', action='store_true',
help='output one summary frame per night')
# The scores option needs to be re-implemented after the refactor.
##parser.add_argument(
## '--scores', action='store_true', help='display scheduler scores')
parser.add_argument(
'--save', type=str, default='surveymovie', metavar='NAME',
help='base name (without extension) of output file to write')
parser.add_argument(
'--fps', type=float, default=10., metavar='FPS',
help='frames per second to render')
parser.add_argument(
'--label', type=str, default='DESI', metavar='TEXT',
help='label to display on each frame')
parser.add_argument(
'--output-path', default=None, metavar='PATH',
help='path that desisurvey files are read from')
parser.add_argument(
'--tiles-file', default=None, metavar='TILES',
help='name of tiles file to use instead of config.tiles_file')
parser.add_argument(
'--config-file', default='config.yaml', metavar='CONFIG',
help='input configuration file')
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
# The scores option needs to be re-implemented after the refactor.
args.scores = False
if args.nightly and args.scores:
log.warn('Cannot display scores in nightly summary.')
args.scores = False
# Validate start/stop date args and covert to datetime objects.
# Unspecified values are taken from our config.
config = desisurvey.config.Configuration(args.config_file)
if args.start is None:
args.start = config.first_day()
else:
try:
args.start = desisurvey.utils.get_date(args.start)
except ValueError as e:
raise ValueError('Invalid start: {0}'.format(e))
if args.stop is None:
args.stop = config.last_day()
else:
try:
args.stop = desisurvey.utils.get_date(args.stop)
except ValueError as e:
raise ValueError('Invalid stop: {0}'.format(e))
if args.start >= args.stop:
raise ValueError('Expected start < stop.')
return args
def wrap(angle, offset=-60):
"""Wrap values in the range [0, 360] to [offset, offset+360].
"""
return np.fmod(angle - offset + 360, 360) + offset
class Animator(object):
"""Manage animation of survey progress.
"""
def __init__(self, exposures_path, start, stop, label, show_scores):
self.log = desiutil.log.get_logger()
self.config = desisurvey.config.Configuration()
self.tiles = desisurvey.tiles.get_tiles()
self.ephem = desisurvey.ephem.get_ephem()
# Load exposures and associated tile data.
self.exposures = astropy.table.Table.read(exposures_path, hdu='EXPOSURES')
self.tiledata = astropy.table.Table.read(exposures_path, hdu='TILEDATA')
self.label = label
self.show_scores = show_scores
self.ra = wrap(self.tiles.tileRA)
self.dec = self.tiles.tileDEC
self.tileprogram = self.tiles.tileprogram
self.tileid = self.tiles.tileID
self.prognames = [p for p in self.tiles.programs]
# if DARK and BRIGHT are names, put them first
for pname in ['BRIGHT', 'DARK']:
if pname in self.prognames:
self.prognames.remove(pname)
self.prognames = [pname] + self.prognames
self.tiles_per_program = {p: np.sum(self.tiles.program_mask[p])
for p in self.prognames}
self.psels = [self.tiles.program_mask[p] for p in self.prognames]
self.start_date = self.config.first_day()
self.survey_weeks = int(np.ceil((self.config.last_day() - self.start_date).days / 7))
# Add some computed columns to the exposures table.
self.exposures['EXPID'] = np.arange(len(self.exposures))
self.exposures['INDEX'] = self.tiles.index(self.exposures['TILEID'])
self.exposures['PROGRAM'] = self.tiles.tileprogram[self.exposures['INDEX']]
self.exposures['STATUS'] = np.ones(len(self.exposures), np.int32)
self.exposures['STATUS'][self.exposures['SNR2FRAC'] == 0] = 0
self.exposures['STATUS'][
self.exposures['SNR2FRAC'] >= self.config.min_snr2_fraction()] = 2
# Convert tables to recarrays for much faster indexing.
self.exposures = self.exposures.as_array()
self.tiledata = self.tiledata.as_array()
# Restrict the list of exposures to [start, stop].
date_start = desisurvey.utils.get_date(start)
date_stop = desisurvey.utils.get_date(stop)
mjd_start = desisurvey.utils.local_noon_on_date(date_start).mjd
mjd_stop = desisurvey.utils.local_noon_on_date(date_stop).mjd
in_range = (self.exposures['MJD'] >= mjd_start) & (self.exposures['MJD'] < mjd_stop)
self.exposures = self.exposures[in_range]
self.num_exp = len(self.exposures)
# Count nights with at least one exposure.
day0 = desisurvey.utils.local_noon_on_date(date_start).mjd
day_number = np.floor(self.exposures['MJD'] - day0)
self.num_nights = len(np.unique(day_number))+1
# Calculate each exposure's LST window.
exp_midpt = astropy.time.Time(
self.exposures['MJD'] + self.exposures['EXPTIME'] / 86400.,
format='mjd', location=desisurvey.utils.get_location())
lst_midpt = exp_midpt.sidereal_time('apparent').to(u.deg).value
# convert from seconds to degrees.
lst_len = self.exposures['EXPTIME'] / 240.
self.lst = np.empty((self.num_exp, 2))
self.lst[:, 0] = wrap(lst_midpt - 0.5 * lst_len)
self.lst[:, 1] = wrap(lst_midpt + 0.5 * lst_len)
def init_figure(self, nightly, width=1920, height=1080, dpi=32):
"""Initialize matplot artists for drawing each frame.
"""
self.dpi = float(dpi)
# Initialize |
rwl/PyCIM | CIM15/IEC61970/Wires/ProtectedSwitch.py | Python | mit | 4,514 | 0.00288 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Wires.Switch import Switch
class ProtectedSwitch(Switch):
"""A ProtectedSwitch is a switching device that can be operated by ProtectionEquipment.A ProtectedSwitch is a switching device that can be operated by ProtectionEquipment.
"""
def __init__(self, breakingCapacity=0.0, ProtectionEquipments=None, RecloseSequences=None, *args, **kw_args):
"""Initialises a new 'ProtectedSwitch' instance.
@param breakingCapacity: The maximum fault current a breaking device can break safely under prescribed conditions of use.
@param ProtectionEquipments: Protection equipments that operate this ProtectedSwitch.
@param RecloseSequences: A breaker may have zero or more automatic reclosures after a trip occurs.
"""
#: The maximum fault current a breaking device can break safely under prescribed conditions of use.
self.breakingCapacity = breakingCapacity
self._ProtectionEquipments = []
self.ProtectionEquipments = [] if ProtectionEquipments is None else Prot | ectionEquipments
self._RecloseSequences = []
self.RecloseSequences = [] if RecloseSequences is None else RecloseSequences
super(ProtectedSwitch, self).__init__(*args, **kw_args)
_attrs = ["breakingCapacity"]
_attr_types = {"breakingCapacity": float}
_defaults = {"breakingCapacity": 0.0}
_enums = {}
_refs = ["ProtectionEquipments", | "RecloseSequences"]
_many_refs = ["ProtectionEquipments", "RecloseSequences"]
def getProtectionEquipments(self):
"""Protection equipments that operate this ProtectedSwitch.
"""
return self._ProtectionEquipments
def setProtectionEquipments(self, value):
for p in self._ProtectionEquipments:
filtered = [q for q in p.ProtectedSwitches if q != self]
self._ProtectionEquipments._ProtectedSwitches = filtered
for r in value:
if self not in r._ProtectedSwitches:
r._ProtectedSwitches.append(self)
self._ProtectionEquipments = value
ProtectionEquipments = property(getProtectionEquipments, setProtectionEquipments)
def addProtectionEquipments(self, *ProtectionEquipments):
for obj in ProtectionEquipments:
if self not in obj._ProtectedSwitches:
obj._ProtectedSwitches.append(self)
self._ProtectionEquipments.append(obj)
def removeProtectionEquipments(self, *ProtectionEquipments):
for obj in ProtectionEquipments:
if self in obj._ProtectedSwitches:
obj._ProtectedSwitches.remove(self)
self._ProtectionEquipments.remove(obj)
def getRecloseSequences(self):
"""A breaker may have zero or more automatic reclosures after a trip occurs.
"""
return self._RecloseSequences
def setRecloseSequences(self, value):
for x in self._RecloseSequences:
x.ProtectedSwitch = None
for y in value:
y._ProtectedSwitch = self
self._RecloseSequences = value
RecloseSequences = property(getRecloseSequences, setRecloseSequences)
def addRecloseSequences(self, *RecloseSequences):
for obj in RecloseSequences:
obj.ProtectedSwitch = self
def removeRecloseSequences(self, *RecloseSequences):
for obj in RecloseSequences:
obj.ProtectedSwitch = None
|
apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/anysex.py | Python | unlicense | 2,085 | 0.002398 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
)
class AnySexIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?anysex\.com/(?P<id>\d+)'
_TEST = {
| 'url': 'http://anysex.com/156592/',
'md5': '023e9fbb7f7987f5529a394c34ad3d3d',
'info_dict': {
'id': '156592',
'ext': 'mp4',
'title': 'Busty and sexy blon | die in her bikini strips for you',
'description': 'md5:de9e418178e2931c10b62966474e1383',
'categories': ['Erotic'],
'duration': 270,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(r"video_url\s*:\s*'([^']+)'", webpage, 'video URL')
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
description = self._html_search_regex(
r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
categories = re.findall(
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
duration = parse_duration(self._search_regex(
r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'duration': duration,
'view_count': view_count,
'age_limit': 18,
}
|
wangy1931/tcollector | collectors/0/couchbase.py | Python | lgpl-3.0 | 4,853 | 0.013188 | #!/usr/bin/env python
"""
Couchbase collector
Refer to the following cbstats documentation for more details:
http://docs.couchbase.com/couchbase-manual-2.1/#cbstats-tool
"""
import os
import sys
import time
import subprocess
import re
from collectors.etc import couchbase_conf
from collectors.lib import utils
CONFIG = couchbase_conf.get_config()
COLLECTION_INTERVAL = CONFIG['collection_interval']
COUCHBASE_INITFILE = CONFIG['couchbase_initfile']
KEYS = frozenset( [
'bucket_active_conns',
'cas_hits',
'cas_misses',
'cmd_get',
'cmd_set',
'curr_connections',
'curr_conns_on_port_11209',
'curr_conns_on_port_11210',
'ep_queue_size',
'ep_num_value_ejects',
'ep_num_eject_failures',
'ep_oom_errors',
'ep_tmp_oom_errors',
'get_hits',
'get_misses',
'mem_used',
'total_connections',
'total_heap_bytes',
' | total_free_bytes',
'total_allocated_bytes',
'total_fragmentation_bytes',
'tcmalloc_current_thread_cache_bytes',
'tcmalloc_max_thread_cache_bytes',
'tcmalloc_unmapped_bytes',
] )
def find_couchbase_pid():
"""Find out the pid of couchbase"""
if not os.path.isfile(COUCHBASE_INITFILE):
return
try:
fd = open(COUCHBASE_INITFI | LE)
for line in fd:
if line.startswith("exec"):
init_script = line.split()[1]
fd.close()
except IOError:
utils.err("Check permission of file (%s)" % COUCHBASE_INITFILE)
return
try:
fd = open(init_script)
for line in fd:
if line.startswith("PIDFILE"):
pid_file = line.split("=")[1].rsplit()[0]
fd.close()
except IOError:
utils.err("Check permission of file (%s)" % init_script)
return
try:
fd = open(pid_file)
pid = fd.read()
fd.close()
except IOError:
utils.err("Couchbase-server is not running, since no pid file exists")
return
return pid.split()[0]
def find_conf_file(pid):
"""Returns config file for couchbase-server."""
try:
fd = open('/proc/%s/cmdline' % pid)
except IOError, e:
utils.err("Couchbase (pid %s) went away ? %s" % (pid, e))
return
try:
config = fd.read().split("config_path")[1].split("\"")[1]
return config
finally:
fd.close()
def find_bindir_path(config_file):
"""Returns the bin directory path"""
try:
fd = open(config_file)
except IOError, e:
utils.err("Error for Config file (%s): %s" % (config_file, e))
return None
try:
for line in fd:
if line.startswith("{path_config_bindir"):
return line.split(",")[1].split("\"")[1]
finally:
fd.close()
def list_bucket(bin_dir):
"""Returns the list of memcached or membase buckets"""
buckets = []
if not os.path.isfile("%s/couchbase-cli" % bin_dir):
return buckets
cli = ("%s/couchbase-cli" % bin_dir)
try:
buck = subprocess.check_output([cli, "bucket-list", "--cluster",
"localhost:8091"])
except subprocess.CalledProcessError:
return buckets
regex = re.compile("[\s\w]+:[\s\w]+$")
for i in buck.splitlines():
if not regex.match(i):
buckets.append(i)
return buckets
def collect_stats(bin_dir, bucket):
"""Returns statistics related to a particular bucket"""
if not os.path.isfile("%s/cbstats" % bin_dir):
return
cli = ("%s/cbstats" % bin_dir)
try:
ts = time.time()
stats = subprocess.check_output([cli, "localhost:11211", "-b", bucket,
"all"])
except subprocess.CalledProcessError:
return
for stat in stats.splitlines():
metric = stat.split(":")[0].lstrip(" ")
value = stat.split(":")[1].lstrip(" \t")
if metric in KEYS:
print ("couchbase.%s %i %s bucket=%s" % (metric, ts, value, bucket))
def main():
utils.drop_privileges()
pid = find_couchbase_pid()
if not pid:
utils.err("Error: Either couchbase-server is not running or file (%s)"
" doesn't exist" % COUCHBASE_INITFILE)
return 13
conf_file = find_conf_file(pid)
if not conf_file:
utils.err("Error: Can't find config file (%s)" % conf_file)
return 13
bin_dir = find_bindir_path(conf_file)
if not bin_dir:
utils.err("Error: Can't find bindir path in config file")
return 13
while True:
# Listing bucket everytime so as to start collecting datapoints
# of any new bucket.
buckets = list_bucket(bin_dir)
for b in buckets:
collect_stats(bin_dir, b)
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
sys.exit(main())
|
shackra/thomas-aquinas | tests/test_draw_line.py | Python | bsd-3-clause | 667 | 0.028486 | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, q"
tags = "Draw, Line"
import summa
from summa.director import director
from summa import draw
import pyglet, math
class TestLayer(summa.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
line = dr | aw.Line((0,0), (100,100), (255,255,255,255))
self.add( line )
def main():
director.init()
test_layer = TestLayer ()
main_scene = summa.scene.Scene (test_layer)
director.run (main_scene)
if __ | name__ == '__main__':
main()
|
ArchiFleKs/magnum | contrib/drivers/k8s_opensuse_v1/version.py | Python | apache-2.0 | 683 | 0 | # Copyright 2016 - SUSE Linux GmbH
#
# Licensed under the Apache License, | Version 2.0 (the "License");
# you may not use this fil | e except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version = '1.0.0'
driver = 'k8s_opensuse_v1'
container_version = '1.12.3'
|
transistorfet/nerve | nerve/base/controllers/files.py | Python | gpl-3.0 | 1,042 | 0.004798 | #!/usr/bin/py | thon3
# -*- coding: utf-8 -*-
import nerve
from nerve.http i | mport PyHTML
import os.path
import mimetypes
class FileController (nerve.Controller):
@classmethod
def get_config_info(cls):
config_info = super().get_config_info()
config_info.add_setting('root', "Root Directory", default='nerve/http/wwwdata')
return config_info
def do_request(self, request):
filename = nerve.files.find_source(os.path.join(self.get_setting('root'), request.get_slug()))
if not nerve.files.validate(filename):
raise Exception("invalid path: " + repr(filename))
if os.path.isdir(filename):
filename = os.path.join(filename, "index.html")
if not os.path.isfile(filename):
raise nerve.NotFoundError("Error file not found: " + filename)
(_, _, extension) = filename.rpartition('.')
if extension == 'pyhtml':
self.set_view(PyHTML(request, None, filename))
else:
self.load_file_view(filename)
|
subeax/grab | test/case/proxy.py | Python | mit | 3,521 | 0.000568 | # coding: utf-8
from unittest import TestCase
#import string
import json
import re
from grab import Grab, GrabMisuseError
from t | est.util import TMP_FILE, GRAB_TRANSPORT, get_temp_file
from t | est.server import SERVER
from grab.proxy import ProxyList
DEFAULT_PLIST_DATA = \
'1.1.1.1:8080\n'\
'1.1.1.2:8080\n'
class GrabProxyTestCase(TestCase):
def setUp(self):
SERVER.reset()
def generate_plist_file(self, data=DEFAULT_PLIST_DATA):
path = get_temp_file()
with open(path, 'w') as out:
out.write(data)
return path
def test_basic(self):
g = Grab(transport=GRAB_TRANSPORT)
self.assertEqual(0, len(g.proxylist.proxy_list))
class ProxyListTestCase(TestCase):
def setUp(self):
SERVER.reset()
def test_basic(self):
pl = ProxyList()
self.assertEqual(0, len(pl.proxy_list))
def generate_plist_file(self, data=DEFAULT_PLIST_DATA):
path = get_temp_file()
with open(path, 'w') as out:
out.write(data)
return path
def test_file_source(self):
pl = ProxyList()
path = self.generate_plist_file()
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
def test_remote_load(self):
pl = ProxyList()
SERVER.RESPONSE['get'] = DEFAULT_PLIST_DATA
pl.set_source('url', url=SERVER.BASE_URL)
self.assertEqual(2, len(pl.proxy_list))
def test_accumulate_updates_basic(self):
# test that all work with disabled accumulate_updates feature
pl = ProxyList()
path = self.generate_plist_file()
pl.setup(accumulate_updates=False)
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
# enable accumulate updates
pl = ProxyList()
pl.setup(accumulate_updates=True)
path = self.generate_plist_file()
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
def test_accumulate_updates_basic(self):
pl = ProxyList()
pl.setup(accumulate_updates=True)
# load initial list
path = self.generate_plist_file('foo:1\nbar:1')
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
# load list with one new and one old proxies
with open(path, 'w') as out:
out.write('foo:1\nbaz:1')
pl.reload(force=True)
self.assertEqual(3, len(pl.proxy_list))
def test_get_next_proxy(self):
pl = ProxyList()
path = self.generate_plist_file('foo:1\nbar:1')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'foo')
self.assertEqual(pl.get_next_proxy().server, 'bar')
self.assertEqual(pl.get_next_proxy().server, 'foo')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'foo')
def test_get_next_proxy_in_accumulate_mode(self):
pl = ProxyList()
pl.setup(accumulate_updates=True)
path = self.generate_plist_file('foo:1\nbar:1')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'foo')
path = self.generate_plist_file('baz:1')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'bar')
self.assertEqual(pl.get_next_proxy().server, 'baz')
self.assertEqual(pl.get_next_proxy().server, 'foo')
|
xuerenlv/PaperWork | datamining_assignments/datamining_final/pandas_other.py | Python | apache-2.0 | 3,159 | 0.010763 | '''
Created on Jan 7, 2016
@author: nlp
'''
import pandas as pd
import numpy as np
import xgboost as xgb
from scipy.optimize import fmin_powell
from ml_metrics import quadratic_weighted_kappa
def eval_wrapper(yhat, y):
y = np.array(y)
y = y.astype(int)
yhat = np.array(yhat)
yhat = np.clip(np.round(yhat), np.min(y), np.max(y)).astype(int)
return quad | ratic_weighted_kappa(yhat, y)
def get_params():
params = {}
params["objective"] = "reg:linear"
params["eta"] = 0.1
params["min_child_weight"] = 80
params["subsample"] = 0.75
params["colsample_bytree"] = 0.30
params["silent"] = 1
params["max_depth"] = 9
return list(params.items())
def apply_offset(data, bin_offset, sv, scorer=eval_wrapper):
# data h | as the format of pred=0, offset_pred=1, labels=2 in the first dim
data[1, data[0].astype(int)==sv] = data[0, data[0].astype(int)==sv] + bin_offset
score = scorer(data[1], data[2])
return score
# global variables
columns_to_drop = ['Id', 'Response']
xgb_num_rounds = 250
num_classes = 8
print("Load the data using pandas")
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
# combine train and test
all_data = train.append(test)
# factorize categorical variables
all_data['Product_Info_2'] = pd.factorize(all_data['Product_Info_2'])[0]
print('Eliminate missing values')
# Use -1 for any others
all_data.fillna(-1, inplace=True)
# fix the dtype on the label column
all_data['Response'] = all_data['Response'].astype(int)
# Provide split column
all_data['Split'] = np.random.randint(5, size=all_data.shape[0])
# split train and test
train = all_data[all_data['Response']>0].copy()
test = all_data[all_data['Response']<1].copy()
# convert data to xgb data structure
xgtrain = xgb.DMatrix(train.drop(columns_to_drop, axis=1), train['Response'].values)
xgtest = xgb.DMatrix(test.drop(columns_to_drop, axis=1), label=test['Response'].values)
# get the parameters for xgboost
plst = get_params()
print(plst)
# train model
model = xgb.train(plst, xgtrain, xgb_num_rounds)
# get preds
train_preds = model.predict(xgtrain, ntree_limit=model.best_iteration)
print('Train score is:', eval_wrapper(train_preds, train['Response']))
test_preds = model.predict(xgtest, ntree_limit=model.best_iteration)
train_preds = np.clip(train_preds, -0.99, 8.99)
test_preds = np.clip(test_preds, -0.99, 8.99)
# train offsets
offsets = np.ones(num_classes) * -0.5
offset_train_preds = np.vstack((train_preds, train_preds, train['Response'].values))
for j in range(num_classes):
train_offset = lambda x: -apply_offset(offset_train_preds, x, j)
offsets[j] = fmin_powell(train_offset, offsets[j])
# apply offsets to test
data = np.vstack((test_preds, test_preds, test['Response'].values))
for j in range(num_classes):
data[1, data[0].astype(int)==j] = data[0, data[0].astype(int)==j] + offsets[j]
final_test_preds = np.round(np.clip(data[1], 1, 8)).astype(int)
preds_out = pd.DataFrame({"Id": test['Id'].values, "Response": final_test_preds})
preds_out = preds_out.set_index('Id')
preds_out.to_csv('xgb_offset_submission.csv')
|
ecolitan/fatics | venv/lib/python2.7/site-packages/netaddr/ip/iana.py | Python | agpl-3.0 | 14,149 | 0.002332 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
#
# DISCLAIMER
#
# netaddr is not sponsored nor endorsed by IANA.
#
# Use of data from IANA (Internet Assigned Numbers Authority) is subject to
# copyright and is provided with prior written permission.
#
# IANA data files included with netaddr are not modified in any way but are
# parsed and made available to end users through an API.
#
# See README file and source code for URLs to latest copies of the relevant
# files.
#
#-----------------------------------------------------------------------------
"""
Routines for accessing data published by IANA (Internet Assigned Numbers
Authority).
More details can be found at the following URLs :-
- IANA Home Page - http://www.iana.org/
- IEEE Protocols Information Home Page - http://www.iana.org/protocols/
"""
import os as _os
import os.path as _path
import sys as _sys
import re as _re
from xml.sax import make_parser, handler
from netaddr.core import Publisher, Subscriber, PrettyPrinter, dos2unix
from netaddr.ip import IPAddress, IPNetwork, IPRange, \
cidr_abbrev_to_verbose, iprange_to_cidrs
from netaddr.compat import _dict_items, _callable
#-----------------------------------------------------------------------------
#: Topic based lookup dictionary for IANA information.
IANA_INFO = {
'IPv4' : {},
'IPv6' : {},
'multicast' : {},
}
#-----------------------------------------------------------------------------
class SaxRecordParser(handler.ContentHandler):
def __init__(self, callback=None):
self._level = 0
self._is_active = False
self._record = None
self._tag_level = None
self._tag_payload = None
self._tag_feeding = None
self._callback = callback
def startElement(self, name, attrs):
self._level += 1
if self._is_active is False:
if name == 'record':
self._is_active = True
self._tag_level = self._level
self._record = {}
if 'date' in attrs:
self._record['date'] = attrs['date']
elif self._level == self._tag_level + 1:
if name == 'xref':
if 'type' in attrs and 'data' in attrs:
l = self._record.setdefault(attrs['type'], [])
l.append(attrs['data'])
else:
self._tag_payload = []
self._tag_feeding = True
else:
self._tag_feeding = False
def endElement(self, name):
if self._is_active is True:
if name == 'record' and self._tag_level == self._level:
self._is_active = False
self._tag_level = None
if _callable(self._callback):
self._c | allback(self._record)
self._record = None
elif self._level == self._tag_level + 1:
if name != 'xref':
self._record[name] = ''.join(self._tag_payload)
self._tag_payload = None
self._tag_feeding = False
self._level -= 1
def characters(self, content):
if self._tag_feeding is True:
self._tag_payload.append(content)
class XMLRecordParser(Publisher):
"""
A configurabl | e Parser that understands how to parse XML based records.
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to XML based record data.
"""
super(XMLRecordParser, self).__init__()
self.xmlparser = make_parser()
self.xmlparser.setContentHandler(SaxRecordParser(self.consume_record))
self.fh = fh
self.__dict__.update(kwargs)
def process_record(self, rec):
"""
This is the callback method invoked for every record. It is usually
over-ridden by base classes to provide specific record-based logic.
Any record can be vetoed (not passed to registered Subscriber objects)
by simply returning None.
"""
return rec
def consume_record(self, rec):
record = self.process_record(rec)
if record is not None:
self.notify(record)
def parse(self):
"""
Parse and normalises records, notifying registered subscribers with
record data as it is encountered.
"""
self.xmlparser.parse(self.fh)
#-----------------------------------------------------------------------------
class IPv4Parser(XMLRecordParser):
"""
A XMLRecordParser that understands how to parse and retrieve data records
from the IANA IPv4 address space file.
It can be found online here :-
- http://www.iana.org/assignments/ipv4-address-space/ipv4-address-space.xml
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to an IANA IPv4 address space file.
kwargs - additional parser options.
"""
super(IPv4Parser, self).__init__(fh)
def process_record(self, rec):
"""
Callback method invoked for every record.
See base class method for more details.
"""
record = {}
for key in ('prefix', 'designation', 'date', 'whois', 'status'):
record[key] = str(rec.get(key, '')).strip()
# Strip leading zeros from octet.
if '/' in record['prefix']:
(octet, prefix) = record['prefix'].split('/')
record['prefix'] = '%d/%d' % (int(octet), int(prefix))
record['status'] = record['status'].capitalize()
return record
#-----------------------------------------------------------------------------
class IPv6Parser(XMLRecordParser):
"""
A XMLRecordParser that understands how to parse and retrieve data records
from the IANA IPv6 address space file.
It can be found online here :-
- http://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xml
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to an IANA IPv6 address space file.
kwargs - additional parser options.
"""
super(IPv6Parser, self).__init__(fh)
def process_record(self, rec):
"""
Callback method invoked for every record.
See base class method for more details.
"""
record = {
'prefix': str(rec.get('prefix', '')).strip(),
'allocation': str(rec.get('description', '')).strip(),
'reference': str(rec.get('rfc', [''])[0]).strip(),
}
return record
#-----------------------------------------------------------------------------
class MulticastParser(XMLRecordParser):
"""
A XMLRecordParser that knows how to process the IANA IPv4 multicast address
allocation file.
It can be found online here :-
- http://www.iana.org/assignments/multicast-addresses/multicast-addresses.xml
"""
def __init__(self, fh, **kwargs):
"""
Constructor.
fh - a valid, open file handle to an IANA IPv4 multicast address
allocation file.
kwargs - additional parser options.
"""
super(MulticastParser, self).__init__(fh)
def normalise_addr(self, addr):
"""
Removes variations from address entries found in this particular file.
"""
if '-' in addr:
(a1, a2) = addr.split('-')
o1 = a1.strip().split('.')
o2 = a2.strip().split('.')
return '%s-%s' % ('.'.join([str(int(i)) for i in o1]),
'.'.join([str(int(i)) for i in o2]))
else:
o1 = addr.strip().split('.')
return '.'.join([str(int(i)) for i in o1])
def proce |
gorserg/openprocurement.tender.competitivedialogue | openprocurement/tender/competitivedialogue/views/stage1/complaint.py | Python | apache-2.0 | 1,110 | 0 | # -*- coding: utf-8 -*-
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openeu.views.complaint import (
TenderEUComplaintResource
)
from openprocurement.tender.competitivedialogue.constants import (
CD_EU_TYPE, CD_UA_TYPE
)
@optendersresource(name='{}:Tender Complaints'.format(CD_EU_TYPE),
collection_path='/tenders/{tender_id}/complaints | ',
path='/tenders/{tender_id}/complaints/{complaint_id}',
procurementMethodType=CD_EU_TYPE,
description="Competitive Dia | logue EU complaints")
class CompetitiveDialogueEUComplaintResource(TenderEUComplaintResource):
pass
@optendersresource(name='{}:Tender Complaints'.format(CD_UA_TYPE),
collection_path='/tenders/{tender_id}/complaints',
path='/tenders/{tender_id}/complaints/{complaint_id}',
procurementMethodType=CD_UA_TYPE,
description="Competitive Dialogue UA complaints")
class CompetitiveDialogueUAComplaintResource(TenderEUComplaintResource):
pass
|
dhamaniasad/caniusepython3 | caniusepython3/test/test_command.py | Python | apache-2.0 | 1,792 | 0.001116 | # Copyright 2014 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 | (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the | License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from caniusepython3 import command
from caniusepython3.test import unittest, skip_pypi_timeouts
from distutils import dist
def make_command(requires):
return command.Command(dist.Distribution(requires))
class RequiresTests(unittest.TestCase):
def verify_cmd(self, requirements):
requires = {requirements: ['pip']}
cmd = make_command(requires)
got = cmd._dependencies()
self.assertEqual(frozenset(got), frozenset(['pip']))
return cmd
def test_install_requires(self):
self.verify_cmd('install_requires')
def test_tests_require(self):
self.verify_cmd('tests_require')
def test_extras_require(self):
cmd = make_command({'extras_require': {'testing': ['pip']}})
got = frozenset(cmd._dependencies())
self.assertEqual(got, frozenset(['pip']))
class OptionsTests(unittest.TestCase):
def test_finalize_options(self):
# Don't expect anything to happen.
make_command({}).finalize_options()
class NetworkTests(unittest.TestCase):
@skip_pypi_timeouts
def test_run(self):
make_command({'install_requires': ['pip']}).run()
|
chingchai/workshop | qgis-scripts/generate_stripmap_index.py | Python | mit | 2,746 | 0.006555 | #!python
# coding: utf-8
# edit by gistnu
# reference from lejedi76
# https://gis.stackexchange.com/questions/173127/generating-equal-sized-polygons-along-line-with-pyqgis
from qgis.core import QgsMapLayerRegistry, QgsGeometry, QgsField, QgsFeature, QgsPoint
from PyQt4.QtCore import QVariant
def getAllbbox(layer, width, height, srid, overlap):
for feature in layer.selectedFeatures():
geom = feature.geometry()
if geom.type() <> QGis.Line:
print "Geometry type should be a LineString"
return 2
bbox = QgsVectorLayer("Polygon?crs=epsg:"+str(srid),
layer.name()+'_id_'+str(feature.id()),
"memory")
gid = QgsField("gid", QVariant.Int, "int")
angle = QgsField("angle", QVariant.Double, "double")
attributes = [gid, angle]
bbox.startEditing()
bboxProvider = bbox.dataProvider()
bboxProvider.addAttributes(attributes)
curs = 0
numbbox = geom.length()/(width)
step = 1.0/numbbox
stepnudge = (1.0-overlap) * step
pageFeatures = []
r = 1
currangle = 0
while curs <= 1:
# print 'r =' + str(r)
# print 'curs = ' + str(curs)
startpoint = geom.interpolate(curs*geom.length())
| endpoint = geom.interpolate((curs+step)*geom.length())
x_start = startpoint.asPoint().x()
y_start = startpoint.asPoint().y()
x_end = endpoint.asPoint().x()
y_end = endpoint.asPoint().y()
print 'x_start :' + str(x_start)
print 'y_start :' | + str(y_start)
currline = QgsGeometry().fromWkt('LINESTRING({} {}, {} {})'.format(x_start, y_start, x_end, y_end))
currpoly = QgsGeometry().fromWkt(
'POLYGON((0 0, 0 {height},{width} {height}, {width} 0, 0 0))'.format(height=height, width=width))
currpoly.translate(0,-height/2)
azimuth = startpoint.asPoint().azimuth(endpoint.asPoint())
currangle = (startpoint.asPoint().azimuth(endpoint.asPoint())+270)%360
currpoly.rotate(currangle, QgsPoint(0,0))
currpoly.translate(x_start, y_start)
currpoly.asPolygon()
page = currpoly
curs = curs + stepnudge
feat = QgsFeature()
feat.setAttributes([r, currangle])
feat.setGeometry(page)
pageFeatures.append(feat)
r = r + 1
bboxProvider.addFeatures(pageFeatures)
bbox.commitChanges()
QgsMapLayerRegistry.instance().addMapLayer(bbox)
return 0
layer = iface.activeLayer()
getAllbbox(layer, 100, 200, 32647, 0.2) #layer, width, height, crs, overlap |
bnorthan/projects | Scripts/Jython/Grand_Challenge/DeconvolveChallengeImages_reflection.py | Python | gpl-2.0 | 1,894 | 0.016367 | # @StatusService status
# @DatasetService data
# @CommandService command
# @DisplayService display
# @IOService io
# this script deconvolves an image.
#
# It is assumed the images have allready been extended. See ExtendChallengeImages_reflection
rootImageDir="/home/bnorthan/Brian2014/Images/General/Deconvolution/Grand_Challenge/EvaluationData/"
inputDir=rootImageDir+"/Extended/reflection/"
outputDir=rootImageDir+"/Extended/reflection/"
baseName=2
inputName=inputDir+str(baseName)+".extended.ome.tif"
psfName=inputDir+str(baseName)+".psf.extended.ome.tif"
imageWindowX=192
imageWindowY=192
imageWindowZ=64
iterations=200
regularizationFactor=0.0005
algorithm="rltv_tn"
deconvolvedName=str(baseName)+str(algorithm)+"."+str(regularizationFactor)+"."+str(iterations)+".ome.tif"
finalName=str(baseName)+str(algorithm)+"."+str(regularizationFactor)+"."+str(iterations)+".final.ome.tif"
# open and display the input image
inputData=data.open(inputName)
display.createDisplay(inputData.getName(), inputData);
# open and disp | lay the psf
psf=data.open(psfName)
display.createDisplay(psf.getName(), psf);
# call RL with total variation
deconvolved = command.run("com.truenorth.commands.fft.TotalVariationRLCommand", True, "input", inputData, "psf", ps | f, "truth", None,"firstGuess", None, "iterations", iterations, "firstGuessType", "constant", "convolutionStrategy", "circulant", "regularizationFactor", regularizationFactor, "imageWindowX", 0, "imageWindowY", 0, "imageWindowZ", 0, "psfWindowX", 0, "psfWindowY", 0, "psfWindowZ", 0).get().getOutputs().get("output");
io.save(deconvolved, outputDir+deconvolvedName);
# crop back down to image window size
final = command.run("com.truenorth.commands.dim.CropCommand", True, "input", deconvolved, "xSize", imageWindowX, "ySize", imageWindowY, "zSize", imageWindowZ).get().getOutputs().get("output");
io.save(final, outputDir+finalName);
|
thombashi/typepy | test/converter/test_bool.py | Python | mit | 2,806 | 0.000356 | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import pytest
import typepy
from ._common import convert_wrapper
class Test_Bool:
@pytest.mark.parametrize(
["method", "strict_level", "value", "expected"],
[
["convert", 0, True, True],
["convert", 0, False, False],
["convert", 0, "true", True],
["convert", 0, "FALSE", False],
["convert", 0, 1, True],
["convert", 0, 1.1, "E"],
["convert", 0, None, "E"],
["convert", 1, True, True],
["convert", 1, "true", True],
["convert", 1, "FALSE", False],
["convert", 1, 1, "E"],
["convert", 1, 1.1, "E"],
["convert", 1, None, "E"],
["convert", 2, True, True],
["convert", 2, "true", "E"],
["convert", 2, "FALSE", "E"],
["convert", 2, 1, "E"],
["convert", 2, 1.1, "E"],
["convert", 2, None, "E"],
["try_convert", 0, True, True],
["try_convert", 0, "true", True],
["try_convert", 0, "FALSE", False],
["try_convert", 0, 1, True],
["try_convert", 0, 1.1, None],
["try_convert", 0, None, None],
["try_convert", 1, True, True],
["try_convert", 1, "true", True],
["try_convert", 1, "FALSE", False],
["try_convert", | 1, 1, None],
["try_convert", 1, 1.1, None],
["try_convert", 1, None, None],
["try_convert", 2, True, True],
| ["try_convert", 2, "true", None],
["try_convert", 2, "FALSE", None],
["try_convert", 2, 1, None],
["try_convert", 2, 1.1, None],
["try_convert", 2, None, None],
["force_convert", 0, True, True],
["force_convert", 0, "true", True],
["force_convert", 0, "FALSE", False],
["force_convert", 0, 1, True],
["force_convert", 0, 1.1, "E"],
["force_convert", 0, None, "E"],
["force_convert", 1, True, True],
["force_convert", 1, "true", True],
["force_convert", 1, "FALSE", False],
["force_convert", 1, 1, True],
["force_convert", 1, 1.1, "E"],
["force_convert", 1, None, "E"],
["force_convert", 2, True, True],
["force_convert", 2, "true", True],
["force_convert", 2, "FALSE", False],
["force_convert", 2, 1, True],
["force_convert", 2, 1.1, "E"],
["force_convert", 2, None, "E"],
],
)
def test_normal(self, method, strict_level, value, expected):
assert convert_wrapper(typepy.Bool(value, strict_level), method) == expected
|
cedricbonhomme/services | freshermeat/web/views/api/v1/language.py | Python | agpl-3.0 | 1,163 | 0 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Freshermeat - An open source software directory and release tracker.
# Copyright (C) 2017-2020 CΓ©dric Bonhomme - https://www.cedricbonhomme.org
#
# For more information: https://sr.ht/~cedric/freshermeat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this pro | gram. If not, s | ee <http://www.gnu.org/licenses/>.
from freshermeat.bootstrap import manager
from freshermeat.models import Language
from freshermeat.web.views.api.v1.common import url_prefix
blueprint_language = manager.create_api_blueprint(
Language, url_prefix=url_prefix, methods=["GET"]
)
|
kelvinongtoronto/numbers | letters.py | Python | artistic-2.0 | 650 | 0.010769 | print " A BBBBBB CCCC DDDDD EEEEEEE FFFFFFF GGGG H H IIIII JJJJJJ"
print " A A B B C C D D E F G G H H I J"
print " A A B B C D D E F G H H I J"
print "AAAAAAA BBBBBB C D D EEEEE | FFFFF G GGG HHHHHHH I J"
print "A A B B C D D E F G | G H H I J"
print "A A B B C C D D E F G G H H I J J"
print "A A BBBBBB CCCC DDDDD EEEEEEE F GGGG H H IIIII JJJJ"
|
tavisrudd/eventlet | eventlet/db_pool.py | Python | mit | 15,932 | 0.005649 | from collections import deque
import sys
import time
from eventlet.pools import Pool
from eventlet import timeout
from eventlet import greenthread
class ConnectTimeout(Exception):
pass
class BaseConnectionPool(Pool):
def __init__(self, db_module,
min_size = 0, max_size = 4,
max_idle = 10, max_age = 30,
connect_timeout = 5,
*args, **kwargs):
"""
Constructs a pool with at least *min_size* connections and at most
*max_size* connections. Uses *db_module* to construct new connections.
The *max_idle* parameter determines how long pooled connections can
remain idle, in seconds. After *max_idle* seconds have elapsed
without the connection being used, the pool closes the connection.
*max_age* is how long any particular connection is allowed to live.
Connections that have been open for longer than *max_age* seconds are
closed, regardless of idle time. If *max_age* is 0, all connections are
closed on return to the pool, reducing it to a concurrency limiter.
*connect_timeout* is the duration in seconds that the pool will wait
before timing out on connect() to the database. If triggered, the
timeout will raise a ConnectTimeout from get().
The remainder of the arguments are used as parameters to the
*db_module*'s connection constructor.
"""
assert(db_module)
self._db_module = db_module
self._args = args
self._kwargs = kwargs
self.max_idle = max_idle
self.max_age = max_age
self.connect_timeout = connect_timeout
self._expiration_timer = None
super(BaseConnectionPool, self).__init__(min_size=min_size,
max_size=max_size,
order_as_stack=True)
def _schedule_expiration(self):
""" Sets up a timer that will call _expire_old_connections when the
oldest connection currently in the free pool is ready to expire. This
is the earliest possible time that a connection could expire, thus, the
timer will be running as infrequently as possible without missing a
possible expiration.
If this function is called when a timer is already scheduled, it does
nothing.
If max_age or max_idle is 0, _schedule_expiration likewise does nothing.
"""
if self.max_age is 0 or self.max_idle is 0:
# expiration is unnecessary because all connections will be expired
# on put
return
if ( self._expiration_timer is not None
and not getattr(self._expiration_timer, 'called', False)
and not getattr(self._expiration_timer, 'cancelled', False) ):
# the next timer is already scheduled
return
try:
now = time.time()
self._expire_old_connections(now)
# the last item in the list, because of the stack ordering,
# is going to be the most-idle
idle_delay = (self.free_items[-1][0] - now) + self.max_idle
oldest = min([t[1] for t in self.free_items])
age_delay = (oldest - now) + self.max_age
next_delay = min(idle_delay, age_delay)
except (IndexError, ValueError):
# no free items, unschedule ourselves
self._expiration_timer = None
return
if next_delay > 0:
# set up a continuous self-calling loop
self._expiration_timer = greenthread.spawn_after(next_delay,
self._schedule_expiration)
def _expire_old_connections(self, now):
""" Iterates through the open connections contained in the pool, closing
ones that have remained idle for longer than max_idle seconds, or have
been in existence for longer than max_age seconds.
*now* is the current time, as returned by time.time().
"""
original_count = len(self.free_items)
expired = [
conn
for last_used, created_at, conn in self.free_items
if self._is_expired(now, last_used, created_at)]
for conn in expired:
self._safe_close(conn, quiet=True)
new_free = [
(last_used, created_at, conn)
for last_used, created_at, conn in self.free_items
if not self._is_expired(now, last_used, created_at)]
self.free_items.clear()
self.free_items.extend(new_free)
# adjust the current size counter to account for expired
# connections
self.current_size -= original_count - len(self.free_items)
def _is_expired(self, now, last_used, created_at):
""" Returns true and closes the connection if it's expired."""
if ( self.max_idle <= 0
or self.max_age <= 0
or now - last_used > self.max_idle
or now - created_at > self.max_age ):
return True
return False
def _unwrap_connection(self, conn):
""" If the connection was wrapped by a subclass of
BaseConnectionWrapper and is still functional (as determined
by the __nonzero__ method), returns the unwrapped connection.
If anything goes wrong with this process, returns None.
"""
base = None
try:
if conn:
base = conn._base
conn._destroy()
else:
base = None
except AttributeError:
pass
return base
def _safe_close(self, conn, quiet = False):
""" Closes the (already unwrapped) connection, squelching any
exceptions."" | "
try:
conn.close()
except (KeyboardInterrupt, SystemExit):
| raise
except AttributeError:
pass # conn is None, or junk
except:
if not quiet:
print "Connection.close raised: %s" % (sys.exc_info()[1])
def get(self):
conn = super(BaseConnectionPool, self).get()
# None is a flag value that means that put got called with
# something it couldn't use
if conn is None:
try:
conn = self.create()
except Exception:
# unconditionally increase the free pool because
# even if there are waiters, doing a full put
# would incur a greenlib switch and thus lose the
# exception stack
self.current_size -= 1
raise
# if the call to get() draws from the free pool, it will come
# back as a tuple
if isinstance(conn, tuple):
_last_used, created_at, conn = conn
else:
created_at = time.time()
# wrap the connection so the consumer can call close() safely
wrapped = PooledConnectionWrapper(conn, self)
# annotating the wrapper so that when it gets put in the pool
# again, we'll know how old it is
wrapped._db_pool_created_at = created_at
return wrapped
def put(self, conn):
created_at = getattr(conn, '_db_pool_created_at', 0)
now = time.time()
conn = self._unwrap_connection(conn)
if self._is_expired(now, now, created_at):
self._safe_close(conn, quiet=False)
conn = None
else:
# rollback any uncommitted changes, so that the next client
# has a clean slate. This also pokes the connection to see if
# it's dead or None
try:
if conn:
conn.rollback()
except KeyboardInterrupt:
raise
except:
# we don't care what the exception was, we just know the
# connection is dead
print "WARNING: connection.rollback raised: %s" % (sys.exc_info()[1])
conn = None
if conn is not None:
super(BaseConn |
SophieBartmann/Faust-Bot | FaustBot/Modules/PrivMsgObserverPrototype.py | Python | gpl-3.0 | 744 | 0 | from FaustBot.Communication.Connection import Connection
from FaustBot.Modules.ModulePrototype import ModulePrototype
from FaustBot.Modules.ModuleType import ModuleType
class PrivMsgObserverPrototype(ModulePrototype):
"""
The Prototype of a Class who can react to every action
"""
@staticmethod
def cmd():
raise NotImplementedError()
@staticmethod
def help():
raise NotImplementedError("Need | sto be implemented by subclasses!")
@staticmethod
def get_module_types():
return [ModuleType.ON_MSG]
def __init__(self):
super().__init__()
def update_on_priv_msg(self, data, connection: Connection):
raise NotImplementedError("So | me module doesn't do anything")
|
FinancialSentimentAnalysis-team/Finanical-annual-reports-analysis-code | luowang/Data_Processing/AllPdfToTxt.py | Python | apache-2.0 | 2,612 | 0.012251 | from pdf2txt import pdfTotxt1, pdfTotxt2
import os
def AllPdftoTxt(stock_dir, dir, root_txt_path):
'''
function: translate all pdf file to txt file in stock_dir directory
stock_dir:the stock number list
dir: the root directory of all reports
root_txt_path: the target directory where the result txt file will be saved
'''
for stock in stock_dir:
years_dir=os.listdir(dir+stock)
for y in years_dir:
type_dir=os.listdir(dir+stock+'/'+y)
for t in type_dir:
report_dir=os.listdir(dir+stock+'/'+y+'/'+t)
if os.path.exists(root_txt):
continue
for r in report_dir:
pdf_path=dir+stock+'/'+y+'/'+t+'/'+r
txt_path=root_txt_path+stock+'/'+y+'/'+t+'/'+r+'.txt'
try:
pdfTotxt1(pdf_path, txt_path)
except:
pdfTotxt2(pdf_path, txt_path)
def MergeFile(root_txt_dir, target_txt_path):
'''
function: merge all txt file into one file in root_txt_dir directory
root_txt_dir: the source directory where save many txt files
target_txt_path: the target txt file path which will save all txt file
'''
file_list=os.listdir(root_txt_dir)
with open(target_txt_path,'a') as f:
for t in file_list:
with open(root_txt_dir+t, 'r') as f1:
content=f1.read()
f.write(content.strip()+'\n')
f1.close()
f.close()
if __name__=='__main__':
root_txt_path='/home/luowang/data/financial reports/demo_68_txt/'
if not os.path.exists(root_txt_path):
os.mkdir(root_txt_path)
root_pdf_path='/home/luowang/data/financial reports/demo_68_test/'
#### translate pdf file into txt file
if os.path.exists(root_pdf_path):
stock_dir=os.listdir(root_pdf_path)
AllPdftoTxt(stock_dir, root_pdf_path, root_txt_path)
### merge all txt files into one file
target_txt_dir='/home/luowang/data/financial | reports/demo_68_txt/txt/'
if not os.path.exists(target_txt_dir):
os.mkdir(target_txt_dir)
for stock in os.listdir(root_txt | _path):
for year in os.listdir(root_txt_path+stock):
for type in os.listdir(root_txt_path+stock+'/'+y):
temp_root_dir=root_txt_path+stock+'/'+y+'/'+type
target_txt_path=target_txt_dir+stock+'_'+year+'_'+type+'_chairman_statement.txt'
MergeFile(temp_root_dir, target_txt_path)
|
mfem/PyMFEM | mfem/_ser/stable3d.py | Python | bsd-3-clause | 5,714 | 0.005425 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _stable3d
else:
import _stable3d
try:
import builtins as __builtin__
except ImportError:
import __builtin_ | _
_swig_new_instance_method = _stable3d.SWIG_PyInstanceMethod_New
_swig_new_static_method = _stable3d.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s. | %s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._ser.element
import mfem._ser.globals
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.densemat
import mfem._ser.vector
import mfem._ser.operators
import mfem._ser.matrix
import mfem._ser.geom
import mfem._ser.intrules
import mfem._ser.table
import mfem._ser.hash
class STable3DNode(object):
r"""Proxy of C++ mfem::STable3DNode class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
Prev = property(_stable3d.STable3DNode_Prev_get, _stable3d.STable3DNode_Prev_set, doc=r"""Prev : p.mfem::STable3DNode""")
Column = property(_stable3d.STable3DNode_Column_get, _stable3d.STable3DNode_Column_set, doc=r"""Column : int""")
Floor = property(_stable3d.STable3DNode_Floor_get, _stable3d.STable3DNode_Floor_set, doc=r"""Floor : int""")
Number = property(_stable3d.STable3DNode_Number_get, _stable3d.STable3DNode_Number_set, doc=r"""Number : int""")
def __init__(self):
r"""__init__(STable3DNode self) -> STable3DNode"""
_stable3d.STable3DNode_swiginit(self, _stable3d.new_STable3DNode())
__swig_destroy__ = _stable3d.delete_STable3DNode
# Register STable3DNode in _stable3d:
_stable3d.STable3DNode_swigregister(STable3DNode)
class STable3D(object):
r"""Proxy of C++ mfem::STable3D class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, nr):
r"""__init__(STable3D self, int nr) -> STable3D"""
_stable3d.STable3D_swiginit(self, _stable3d.new_STable3D(nr))
def Push(self, r, c, f):
r"""Push(STable3D self, int r, int c, int f) -> int"""
return _stable3d.STable3D_Push(self, r, c, f)
Push = _swig_new_instance_method(_stable3d.STable3D_Push)
def Index(self, r, c, f):
r"""Index(STable3D self, int r, int c, int f) -> int"""
return _stable3d.STable3D_Index(self, r, c, f)
Index = _swig_new_instance_method(_stable3d.STable3D_Index)
def Push4(self, r, c, f, t):
r"""Push4(STable3D self, int r, int c, int f, int t) -> int"""
return _stable3d.STable3D_Push4(self, r, c, f, t)
Push4 = _swig_new_instance_method(_stable3d.STable3D_Push4)
def __call__(self, *args):
r"""
__call__(STable3D self, int r, int c, int f) -> int
__call__(STable3D self, int r, int c, int f, int t) -> int
"""
return _stable3d.STable3D___call__(self, *args)
__call__ = _swig_new_instance_method(_stable3d.STable3D___call__)
def NumberOfElements(self):
r"""NumberOfElements(STable3D self) -> int"""
return _stable3d.STable3D_NumberOfElements(self)
NumberOfElements = _swig_new_instance_method(_stable3d.STable3D_NumberOfElements)
__swig_destroy__ = _stable3d.delete_STable3D
def Print(self, *args):
r"""
Print(STable3D self, std::ostream & out=out)
Print(STable3D self, char const * file, int precision=16)
"""
return _stable3d.STable3D_Print(self, *args)
Print = _swig_new_instance_method(_stable3d.STable3D_Print)
def PrintGZ(self, file, precision=16):
r"""PrintGZ(STable3D self, char const * file, int precision=16)"""
return _stable3d.STable3D_PrintGZ(self, file, precision)
PrintGZ = _swig_new_instance_method(_stable3d.STable3D_PrintGZ)
# Register STable3D in _stable3d:
_stable3d.STable3D_swigregister(STable3D)
|
laijingtao/landlab | landlab/components/overland_flow/generate_overland_flow_kinwave.py | Python | mit | 7,019 | 0.004132 | # -*- coding: utf-8 -*-
"""
Landlab component for overland flow using the kinematic-wave approximation.
Created on Fri May 27 14:26:13 2016
@author: gtucker
"""
from landlab import Component
import numpy as np
class KinwaveOverlandFlowModel(Component):
"""
Calculate water flow over topography.
Landlab component that implements a two-dimensional
kinematic wave model. This is an extremely simple, unsophisticated
model, originally built simply to demonstrate the component creation
process. Limitations to the present version include: infiltration is
handled very crudely, the called is responsible for picking a stable
time step size (no adaptive time stepping is used in the `run_one_step`
method), precipitation rate is constant for a given duration (then zero),
and all parameters are uniform in space. Also, the terrain is assumed
to be stable over time. Caveat emptor!
Construction:
KinwaveOverlandFlowModel(grid, precip_rate=1.0,
precip_duration=1.0,
infilt_rate=0.0,
roughness=0.01, **kwds)
Parameters
----------
grid : ModelGrid
A Landlab grid object.
precip_rate : float, optional (defaults to 1 mm/hr)
Precipitation rate, mm/hr
precip_duration : float, optional (defaults to 1 hour)
Duration of precipitation, hours
infilt_rate : float, optional (defaults to 0)
Maximum rate of infiltration, mm/hr
roughnes : float, defaults to 0.01
Manning roughness coefficient, s/m^1/3
Examples
--------
>>> from landlab import RasterModelGrid
>>> rg = RasterModelGrid((4, 5), 10.0)
>>> kw = KinwaveOverlandFlowModel(rg)
>>> kw.vel_coef
100.0
>>> rg.at_node['surface_water__depth']
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.])
"""
_name = 'KinwaveOverlandFlowModel'
_input_var_names = (
'topographic__elevation',
'topographic__gradient',
)
_output_var_names = (
'surface_water__depth',
'water__velocity',
'water__specific_discharge',
)
_var_units = {
'topographic__elevation' : 'm',
'topographic__slope' : 'm/m',
'surface_water__depth' : 'm',
'water__velocity' : 'm/s',
'water__specific_discharge' : 'm2/s',
}
_var_mapping = {
'topographic__elevation' : 'node',
'topographic__gradient' : 'link',
'surface_water__depth' : 'node',
'water__velocity' : 'link',
'water__specific_discharge' : 'link',
}
_var_doc = {
'topographic__elevation':
'elevation of the ground surface relative to some datum',
'topographic__gradient':
'gradient of the ground surface',
'surface_water__depth':
'depth of water',
'water__velocity':
'flow velocity component in the direction of the link',
'water__specific_discharge':
'flow discharge component in the direction of the link',
}
def __init__(self, grid, precip_rate=1.0, precip_duration=1.0,
infilt_rate=0.0, roughness=0.01, **kwds):
"""Initialize the KinwaveOverlandFlowModel.
Parameters
----------
grid : ModelGrid
Landlab ModelGrid object
precip_rate : float, optional (defaults to 1 mm/hr)
Precipitation rate, mm/hr
precip_duration : float, optional (defaults to 1 hour)
Duration of precipitation, hours
infilt_rate : float, optional (defaults to 0)
Maximum rate of infiltration, mm/hr
roughnes : float, defaults to 0.01
Manning roughness coefficient, s/m^1/3
"""
# | Store grid and parameters and do unit conversion
self._grid = grid
self.precip = precip_rate / 3600000.0 # convert to m/s
self.precip_duration = precip_duration * 3600.0 # h->s
self.infilt = infilt_rate / 3600000.0 # convert to m/s
self.vel_coef = 1.0 / | roughness # do division now to save time
# Create fields...
# Elevation
if 'topographic__elevation' in grid.at_node:
self.elev = grid.at_node['topographic__elevation']
else:
self.elev = grid.add_zeros('node',
'topographic__elevation')
# Water depth
if 'surface_water__depth' in grid.at_node:
self.depth = grid.at_node['surface_water__depth']
else:
self.depth = grid.add_zeros('node', 'surface_water__depth')
# Slope
if 'topographic__gradient' in grid.at_link:
self.slope = grid.at_link['topographic__gradient']
else:
self.slope = grid.add_zeros('link', 'topographic__gradient')
# Velocity
if 'water__velocity' in grid.at_link:
self.vel = grid.at_link['water__velocity']
else:
self.vel = grid.add_zeros('link', 'water__velocity')
# Discharge
if 'water__specific_discharge' in grid.at_link:
self.disch = grid.at_link['water__specific_discharge']
else:
self.disch = grid.add_zeros('link',
'water__specific_discharge')
# Calculate the ground-surface slope (assume it won't change)
self.slope[self._grid.active_links] = \
self._grid.calc_grad_at_link(self.elev)[self._grid.active_links]
self.sqrt_slope = np.sqrt( self.slope )
self.sign_slope = np.sign( self.slope )
def run_one_step(self, dt, current_time=0.0, **kwds):
"""Calculate water flow for a time period `dt`.
"""
# Calculate water depth at links. This implements an "upwind" scheme
# in which water depth at the links is the depth at the higher of the
# two nodes.
H_link = self._grid.map_value_at_max_node_to_link(
'topographic__elevation', 'surface_water__depth')
# Calculate velocity using the Manning equation.
self.vel = -self.sign_slope * self.vel_coef * H_link**0.66667 \
* self.sqrt_slope
# Calculate discharge
self.disch = H_link * self.vel
# Flux divergence
dqda = self._grid.calc_flux_div_at_node(self.disch)
# Rate of change of water depth
if current_time < self.precip_duration:
ppt = self.precip
else:
ppt = 0.0
dHdt = ppt - self.infilt - dqda
# Update water depth: simple forward Euler scheme
self.depth[self._grid.core_nodes] += dHdt[self._grid.core_nodes] * dt
# Very crude numerical hack: prevent negative water depth
self.depth[np.where(self.depth < 0.0)[0]] = 0.0
if __name__ == '__main__':
import doctest
doctest.testmod()
|
LockScreen/Backend | venv/bin/rst2xetex.py | Python | mit | 826 | 0.001211 | #!/Users/Varun/Documents/GitHub/LockScreen/venv/bin/python
# $Id: rst2xetex.py 7038 2011-05-19 09:12:02Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing XeLaTeX source code.
"""
try:
import lo | cale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates XeLaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publi | sh_cmdline(writer_name='xetex', description=description)
|
jonathanslenders/python-prompt-toolkit | examples/print-text/print-frame.py | Python | bsd-3-clause | 331 | 0 | #!/usr/bin/env python
""" |
Example usage of 'print_container', a tool to print
any layout in a non-interactive way.
"""
from prompt_toolkit.shortcuts import print_container
from prompt_toolkit.widgets import Frame, TextArea
print_container(
Frame(
TextArea(text="Hello world!\n"),
title="Stage: parse",
)
)
| |
eduherraiz/naniano | src/apconf/mixins/database.py | Python | gpl-2.0 | 800 | 0 | # -*- coding: utf-8 -*-
from apconf import Options
opts = Options()
def get(value, d | efault):
return opts.get(value, default, section='Database')
class DatabasesMixin(object):
def DATABASES(self):
engine = get('DATABASE_ENGINE', 'sqlite3')
if 'django.db.backends' in engine:
ENGINE = engine
else:
ENGINE = 'django.db.backends.' + engine
re | turn {
'default': {
'ENGINE': ENGINE,
'NAME': get('DATABASE_NAME', 'db.sqlite'),
'USER': get('DATABASE_USER', None),
'PASSWORD': get('DATABASE_PASSWORD', ''),
'HOST': get('DATABASE_HOST', ''),
'PORT': get('DATABASE_PORT', ''),
'OPTIONS': {},
}
}
|
kevinmcain/graal | mxtool/mx.py | Python | gpl-2.0 | 148,173 | 0.005851 | #!/usr/bin/python
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
r"""
mx is a command line tool inspired by mvn (http://maven.apache.org/)
and hg (http://mercurial.selenic.com/). It includes a mechanism for
managing the dependencies between a set of projects (like Maven)
as well as making it simple to run commands
(like hg is the interface to the Mercurial commands).
The organizing principle of mx is a project suite. A suite is a directory
containing one or more projects. It's not coincidental that this closely
matches the layout of one or more projects in a Mercurial repository.
The configuration information for a suite lives in an 'mx' sub-directory
at the top level of the suite. A suite is given a name by a 'suite=name'
property in the 'mx/projects' file (if omitted the name is suite directory).
An 'mx' subdirectory can be named as plain 'mx' or 'mxbasename', where
'basename' is the os.path.basename of the suite directory.
The latter is useful to avoid clashes in IDE project names.
When launched, mx treats the current working directory as a suite.
This is the primary suite. All other suites are called included suites.
The configuration files (i.e. in the 'mx' sub-directory) of a suite are:
projects
Defines the projects and libraries in the suite and the
dependencies between them.
commands.py
Suite specific extensions to the commands available to mx.
includes
Other suites to be loaded. This is recursive. Each
line in an includes file is a path to a suite directory.
env
A set of environment variable definitions. These override any
existing environment variables. Common properties set here
include JAVA_HOME and IGNORED_PROJECTS.
The includes and env files are typically not put under version control
as they usually contain local file-system paths.
The projects file is like the pom.xml file from Maven except that
it is a properties file (not XML). Each non-comment line
in the file specifies an attribute of a project or library. The main
difference between a project and a library is that the former contains
source code built by the mx tool where as the latter is an external
dependency. The format of the projects file is
Library specification format:
library@<name>@<prop>=<value>
Built-in library properties (* = required):
*path
The file system path for the library to appear on a class path.
urls
A comma separated list of URLs from which the library (jar) can
be downloaded and saved in the location specified by 'path'.
optional
If "true" then this library will be omitted from a class path
if it doesn't exist on the file system and no URLs are specified.
sourcePath
The file system path for a jar file containing the library sources.
sourceUrls
A comma separated list of URLs from which the library source jar can
be downloaded and saved in the location specified by 'sourcePath'.
Project specification format:
project@<name>@<prop>=<value>
The name of a project also denotes the directory it is in.
Built-in project properties (* = required):
subDir
The sub-directory of the suite in which the project directory is
contained. If not specified, the project directory is directly
under the suite directory.
*sourceDirs
A comma separated list of source directory names (relative to
the project directory).
dependencies
A comma separated list of the libraries and project the project
depends upon (transitive dependencies should be omitted).
checkstyle
The project whose Checkstyle configuration
(i.e. <project>/.checkstyle_checks.xml) is used.
native
"true" if the project is native.
javaCompliance
The minimum JDK version (format: x.y) to which the project's
sources comply (required for non-native projects).
workingSets
A comma separated list of working set names. The project belongs
to the given working sets, for which the eclipseinit command
will generate Eclipse configurations.
Other properties can be specified for projects and libraries for use
by extension commands.
Property values can use environment variables with Bash syntax (e.g. ${HOME}).
"""
import sys, os, errno, time, subprocess, shlex, types, urllib2, contextlib, StringIO, zipfile, signal, xml.sax.saxutils, tempfile, fnmatch
import textwrap
import xml.parsers.expat
import shutil, re, xml.dom.minidom
from collections import Callable
from threading import Thread
from argparse import ArgumentParser, REMAINDER
from os.path import join, basename, dirname, exists, getmtime, isabs, expandvars, isdir, isfile
DEFAULT_JAVA_ARGS = '-ea -Xss2m -Xmx1g'
_projects = dict()
_libs = dict()
_dists = dict()
_suites = dict()
_annotationProcessors = None
_mainSuite = None
_opts = None
_java = None
"""
A distribution is a jar or zip file containing the output from one or more Java projects.
"""
class Distribution:
def __init__(self, suite, name, path, deps):
self.suite = suite
self.name = name
self.path = path.replace('/', os.sep)
if not isabs(self.path):
self.path = join(suite.dir, self.path)
self.deps = deps
self.update_listeners = set()
def __str__(self):
return self.name
def add_update_listener(self, listener):
self.update_listeners.add(listener)
def notify_updated(self):
for l in self.update_listeners:
l(self) |
"""
A dependency is a library or project specified in a suite.
"""
class Dependency:
def __init__(self, suite, name):
self.name = name
self.suite = suite
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __hash__(self):
return hash(self.name)
| def isLibrary(self):
return isinstance(self, Library)
def isProject(self):
return isinstance(self, Project)
class Project(Dependency):
def __init__(self, suite, name, srcDirs, deps, javaCompliance, workingSets, d):
Dependency.__init__(self, suite, name)
self.srcDirs = srcDirs
self.deps = deps
self.checkstyleProj = name
self.javaCompliance = JavaCompliance(javaCompliance) if javaCompliance is not None else None
self.native = False
self.workingSets = workingSets
self.dir = d
# Create directories for projects that don't yet exist
if not exists(d):
os.mkdir(d)
for s in self.source_dirs():
if not exists(s):
os.mkdir(s)
def all_deps(self, deps, includeLibs, includeSelf=True, includeAnnotationProcessors=False):
"""
Add the transitive set of dependencies for this projec |
bokeh/bokeh | tests/unit/bokeh/client/test_states.py | Python | bsd-3-clause | 3,726 | 0.008588 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any
# Bokeh imports
from bokeh.protocol.message import Message
# Module under test
import bokeh.client.states as bcs # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
class MockConnection:
state: Any
def __init__(self, to_pop: Message[Any] | None = None) -> None:
self.state = None
self._to_pop = to_pop
async def _connect_async(self):
self.state = "_connect_async"
async def _wait_for_ack(self):
self.state = "_wait_for_ack"
async def _handle_messages(self):
self.state = "_handle_messages"
async def _transition(self, arg: Any):
self.state = ("_transition", arg)
async def _transition_to_disconnected(self, arg: Any):
self.state = "_transition_to_disconnected"
async def _next(self):
self.state = "_next"
async def _pop_message(self):
return self._to_pop
class MockMessage:
header = {'reqid': 'reqid'}
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
async def test_NOT_YET_CONNECTED() -> None:
s = bcs.NOT_YET_CONNECTED()
c = MockConnection()
await s.run(c)
assert c.state == "_connect_async"
async def test_CONNECTED_BEFORE_ACK() -> None:
s = bcs.CONNECTED_BEFORE_ACK()
c = MockConnection()
await s.run(c)
assert c.state == "_wait_for_ack"
async def test_CONNECTED_AFTER_ACK() -> None:
s = bcs.CONNECTED_AFTER_ACK()
c = MockConnection()
await s.run(c)
assert c.state == "_handle_messages"
async def test_DISCONNECTED() -> None:
s = bcs.DISCONNECTED()
c = MockConnection()
await s.run(c)
assert c.state is None
async def test_W | AITING_FOR_REPLY() -> None:
s = bcs.WAITING_FOR_REPLY("reqid")
assert s.reply == None
assert s.reqid == "reqid"
c = MockConnection()
await s.run(c)
assert c.state == "_transition_to_disconnected"
assert s.reply is None
m = MockMessage()
c = MockConnection(to_pop=m)
await s.run(c)
assert c.state[0] == "_transition"
assert isinstance(c.state[1], bcs.CONNECTED_AFTER_ACK)
assert s.reply is m
s._reqid = "nomatch"
c = MockConnec | tion(to_pop=m)
await s.run(c)
assert c.state == "_next"
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
tardyp/buildbot | master/buildbot/db/steps.py | Python | gpl-2.0 | 7,224 | 0.000277 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import json
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
from buildbot.util import epoch2datetime
class StepsConnectorComponent(base.DBConnectorComponent):
# Documentation is in developer/db.rst
url_lock = None
@defer.inlineCallbacks
def getStep(self, stepid=None, buildid=None, number=None, name=None):
tbl = self.db.model.steps
if stepid is not None:
wc = (tbl.c.id == stepid)
else:
if buildid is None:
raise RuntimeError('must supply either stepid or buildid')
if number is not None:
wc = (tbl.c.number == number)
elif name is not None:
wc = (tbl.c.name == name)
else:
raise RuntimeError('must supply either number or name')
wc = wc & (tbl.c.buildid == buildid)
def thd(conn):
q = self.db.model.steps.select(whereclause=wc)
res = conn.execute(q)
row = res.fetchone()
rv = None
if row:
rv = self._stepdictFromRow(row)
res.close()
return rv
return (yield self.db.pool.do(thd))
# returns a Deferred that returns a value
def getSteps(self, buildid):
def thd(conn):
tbl = self.db.model.steps
q = tbl.select()
q = q.where(tbl.c.buildid == buildid)
q = q.order_by(tbl.c.number)
res = conn.execute(q)
return [self._stepdictFromRow(row) for row in res.fetchall()]
return self.db.pool.do(thd)
# returns a Deferred that returns a value
def addStep(self, buildid, name, state_string):
def thd(conn):
tbl = self.db.model.steps
# get the highest current number
r = conn.execute(sa.select([sa.func.max(tbl.c.number)],
whereclause=(tbl.c.buildid == buildid)))
number = r.scalar()
number = 0 if number is None else number + 1
# note that there is no chance for a race condition here,
# since only one master is inserting steps. If there is a
# conflict, then the name is likely already taken.
insert_row = dict(buildid=buildid, number=number,
started_at=None, complete_at=None,
state_string=state_string,
urls_json='[]', name=name)
try:
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
got_id = None
if got_id:
return (got_id, number, name)
# we didn't get an id, so calculate a unique name and use that
# instead. Because names are truncated at the right to fit in a
# 50-character identifier, this isn't a simple query.
res = conn.execute(sa.select([tbl.c.name],
whereclause=((tbl.c.buildid == buildid))))
names = {row[0] for row in res}
num = 1
while True:
numstr = '_%d' % num
newname = name[:50 - len(numstr)] + numstr
if newname not in names:
break
num += 1
insert_row['name'] = newname
r = conn.execute(self.db.model.steps.insert(), insert_row)
got_id = r.inserted_primary_key[0]
return (got_id, number, newname)
return self.db.pool.do(thd)
@defer.inlineCallbacks
def startStep(self, stepid):
started_at = int(self.master.reactor.seconds())
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, started_at=started_at)
yield self.db.pool.do(thd)
# returns a Deferred that returns None
def setStepStateString(self, stepid, state_string):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q, state_string=state_string)
return self.db.pool.do(thd)
def addURL(self, stepid, name, url, _racehook=None):
# This methods adds an URL to the db
# This is a read modify write and thus there is a possibility
# that several urls are added at the same time (e.g with a deferredlist
# at the end of a step)
# this race condition is only inside the same master, as only one master
# is supposed to add urls to | a buildstep.
# so threading.lock is used, as we are in the thread pool
if self.url_lock is None:
# this runs in reactor thread, so no race here..
self.url_lock = defer.DeferredLock()
def thd(conn):
tbl = self.db.model.steps
wc = (tbl.c.id == stepid)
q = sa.select([tbl.c.urls_json],
whereclause=wc)
res = conn.execute(q)
row = res.fetcho | ne()
if _racehook is not None:
_racehook()
urls = json.loads(row.urls_json)
url_item = dict(name=name, url=url)
if url_item not in urls:
urls.append(url_item)
q = tbl.update(whereclause=wc)
conn.execute(q, urls_json=json.dumps(urls))
return self.url_lock.run(lambda: self.db.pool.do(thd))
# returns a Deferred that returns None
def finishStep(self, stepid, results, hidden):
def thd(conn):
tbl = self.db.model.steps
q = tbl.update(whereclause=(tbl.c.id == stepid))
conn.execute(q,
complete_at=int(self.master.reactor.seconds()),
results=results,
hidden=1 if hidden else 0)
return self.db.pool.do(thd)
def _stepdictFromRow(self, row):
return dict(
id=row.id,
number=row.number,
name=row.name,
buildid=row.buildid,
started_at=epoch2datetime(row.started_at),
complete_at=epoch2datetime(row.complete_at),
state_string=row.state_string,
results=row.results,
urls=json.loads(row.urls_json),
hidden=bool(row.hidden))
|
johnnygreco/hugs | scripts/runner.py | Python | mit | 6,693 | 0.004333 | """
Run hugs pipeline.
"""
from __future__ import division, print_function
import os, shutil
from time import time
import mpi4py.MPI as MPI
import schwimmbad
from hugs.pipeline import next_gen_search, find_lsbgs
from hugs.utils import PatchMeta
import hugs
def ingest_data(args):
"""
Write data to database with the master process.
"""
timer = time()
success, sources, meta_data, synth_ids = args
run_name, tract, patch, patch_meta = meta_data
db_ingest = hugs.database.HugsIngest(session, run_name)
if success:
db_ingest.add_all(tract, patch, patch_meta, sources)
if synth_ids is not None:
db_ingest.add_injected_synths(synth_ids)
else:
db_ingest.add_tract(tract)
db_ingest.add_patch(patch, patch_meta)
delta_time = time() - timer
hugs.log.logger.info('time to ingest = {:.2f} seconds'.format(delta_time))
def worker(p):
"""
Workers initialize pipe configuration and run pipeline.
"""
rank = MPI.COMM_WORLD.Get_rank()
if p['seed'] is None:
tract, p1, p2 = p['tract'], int(p['patch'][0]), int(p['patch'][-1])
seed = [int(time()), tract, p1, p2, rank]
else:
seed = p['seed']
config = hugs.PipeConfig(run_name=p['run_name'],
config_fn=p['config_fn'],
random_state=seed,
log_fn=p['log_fn'],
rerun_path=p['rerun_path'])
config.set_patch_id(p['tract'], p['patch'])
config.logger.info('random seed set to {}'.format(seed))
if p['use_old_pipeline']:
results = find_lsbgs.run(config)
else:
results = next_gen_search.run(config, False)
pm = results.hugs_exp.patch_meta
if (results.synths is not None) and results.success:
if len(results.synths) > 0:
synth_ids = results.synths.to_pandas().loc[:, ['synth_id']]
for plane in config.synth_check_masks:
masked = hugs.synths.find_masked_synths(results.synths,
results.exp_clean,
planes=plane)
synth_ids['mask_' + plane.lower()] = masked
| else:
synth_ids | = None
else:
synth_ids = None
patch_meta = PatchMeta(
x0 = pm.x0,
y0 = pm.y0,
small_frac = pm.small_frac,
cleaned_frac = pm.cleaned_frac,
bright_obj_frac = pm.bright_obj_frac,
good_data_frac = pm.good_data_frac
)
meta_data = [
config.run_name,
config.tract,
config.patch,
patch_meta,
]
if results.success:
df = results.sources.to_pandas()
df['flags'] = df['flags'].astype(int)
else:
df = None
config.reset_mask_planes()
config.logger.info('writing results to database')
return results.success, df, meta_data, synth_ids
if __name__=='__main__':
from argparse import ArgumentParser
from astropy.table import Table
rank = MPI.COMM_WORLD.Get_rank()
# parse command-line arguments
parser = ArgumentParser('Run hugs pipeline')
parser.add_argument('-t', '--tract', type=int, help='HSC tract')
parser.add_argument('-p', '--patch', type=str, help='HSC patch')
parser.add_argument('-c', '--config_fn', help='hugs config file',
default=hugs.utils.default_config_fn)
parser.add_argument('--patches_fn', help='patches file')
parser.add_argument('--use-old-pipeline', action="store_true")
parser.add_argument('-r', '--run_name', type=str, default='hugs-pipe-run')
parser.add_argument('--seed', help='rng seed', default=None)
parser.add_argument('--rerun_path', help='full rerun path', default=None)
parser.add_argument('--overwrite', type=bool,
help='overwrite database', default=True)
group = parser.add_mutually_exclusive_group()
group.add_argument('--ncores', default=1, type=int,
help='Number of processes (uses multiprocessing).')
group.add_argument('--mpi', default=False, action="store_true",
help="Run with MPI.")
args = parser.parse_args()
config_params = hugs.utils.read_config(args.config_fn)
outdir = config_params['hugs_io']
#######################################################################
# run on a single patch
#######################################################################
if args.tract is not None:
assert args.patch is not None
tract, patch = args.tract, args.patch
patches = Table([[tract], [patch]], names=['tract', 'patch'])
run_dir_name = '{}-{}-{}'.format(args.run_name, tract, patch)
outdir = os.path.join(outdir, run_dir_name)
hugs.utils.mkdir_if_needed(outdir)
log_fn = os.path.join(outdir, 'hugs-pipe.log')
patches['outdir'] = outdir
patches['log_fn'] = log_fn
#######################################################################
# OR run on all patches in file
#######################################################################
elif args.patches_fn is not None:
patches = Table.read(args.patches_fn)
if rank==0:
time_label = hugs.utils.get_time_label()
outdir = os.path.join(
outdir, '{}-{}'.format(args.run_name, time_label))
hugs.utils.mkdir_if_needed(outdir)
log_dir = os.path.join(outdir, 'log')
hugs.utils.mkdir_if_needed(log_dir)
log_fn = []
for tract, patch in patches['tract', 'patch']:
fn = os.path.join(log_dir, '{}-{}.log'.format(tract, patch))
log_fn.append(fn)
patches['outdir'] = outdir
patches['log_fn'] = log_fn
else:
print('\n**** must give tract and patch --or-- a patch file ****\n')
parser.print_help()
exit()
patches['rerun_path'] = args.rerun_path
patches['seed'] = args.seed
patches['config_fn'] = args.config_fn
patches['run_name'] = args.run_name
patches['use_old_pipeline'] = args.use_old_pipeline
if rank==0:
# open database session with master process
db_fn = os.path.join(outdir, args.run_name+'.db')
engine = hugs.database.connect(db_fn, args.overwrite)
session = hugs.database.Session()
shutil.copyfile(args.config_fn, os.path.join(outdir, 'config.yml'))
pool = schwimmbad.choose_pool(mpi=args.mpi, processes=args.ncores)
list(pool.map(worker, patches, callback=ingest_data))
pool.close()
|
MGautier/Programacion | Python/salida_estandar.py | Python | mit | 1,308 | 0.009174 | #!/usr/bin/env python
print "Hola\n\n\tmundo"
#Para que la impresion se realizara en la misma linea tendriamos
# que colocar una coma al final de la sentencia
for i in range(3):
print i,
print "\n"
for i in range(3):
print i
#Diferencias entre , y el + en las cadenas: al utilizar las comas
#print introduce automaticamente un espacio para separar cada una
# de las cadenas. Este no es el caso al utilizar el operador +,
# ya que lo que le llega a print es un solo argumento: una cadena
# ya concatenada. Ademas, al utilizar el operador + tendriamos que
# convertir antes cada argumento en una cadena de no serlo ya,
# ya que no es posible concatenar cadenas y otros tipos, mientras
# que al usar el primer metodo no es necesaria la conversion
print "Cuesta", 3, "euros"
#print "Cuesta" + 3 + "euros" #Esto saldria error
# Formateo de salida estandar
print "Hola %s" % "mundo"
print "%s %s" % ("Hola", "mundo")
print "%10s mundo" % "Hola" #Numero de caracteres de esa cadena parametro
print "%- | 10s mundo" % "Hola" #Numero de caracteres de esa cadena
# en este caso | al ser de 4 establecera una separacion de 6 caracteres
# a mundo
from math import pi
print "%.4f" % pi # el .4f especifica el numero de decimales
print "%.4s" % "hola mundo" #especifica el tam maximo de la cadena parametro
|
eJRF/ejrf | questionnaire/features/locations_steps.py | Python | bsd-3-clause | 2,468 | 0.004457 | from lettuce import step, world
from questionnaire.features.pages.locations import ListRegionsPage, ListCountriesPage
from questionnaire.features.pages.step_utils import create_user_with_no_permissions, assign
from questionnaire.features.pages.users import LoginPage
from questionnaire.models.locations import Region, Country, Organization
@step(u'Given I am logged in as a data submitter')
def given_i_am_logged_in(step):
world.user, world.uganda, world.region = create_user_with_no_permissions()
world.user = assign('can_submit_responses', world.user)
world.page = LoginPage(world.browser)
world.page.visit()
world.page.login(world.user, 'pass')
@step(u'And I have two regions')
def given_i_have_two_regions(step):
world.org = Organization.objects.create(name="WHO")
world.afro = Region.objects.create(name="AFRO", organization=world.org)
world.paho = Region.objects.create(name="PAHO", organization=world.org)
@step(u'And I visit the list regions page')
def and_i_visit_the_list_regions_page(step):
world.page = ListRegionsPage(world.browser)
world.page.visit()
@step(u'Then I should see the list of regions')
def then_i_should_see_the_list_of_regions(step):
world.page.validate_region_list([world.afro, world.paho])
@step(u'Given I have two countries in a region')
def given_i_have_two_countries_in_a_region(step):
world.org = Organization.objects.create(name="WHO")
world.afro = Region.objects.create( | name="AFRO", organization=world.org)
world.uganda = Country.objects.create(name="Uganda", code="UGX")
world.uganda.regions.add(world.afro)
world.kenya = Country.objects.create(name="Kenya", code="KSX")
world.kenya.regions.add(world.afro)
@step(u | 'And I visit the list countries page in that region')
def and_i_visit_the_list_countries_page_in_that_region(step):
world.page = ListCountriesPage(world.browser, world.afro)
world.page.visit()
@step(u'Then I should see the list of countries in that region')
def then_i_should_see_the_list_of_countries_in_that_region(step):
world.page.validate_country_list([world.uganda, world.kenya])
@step(u'When I click on the first region name')
def when_i_click_on_the_first_region_name(step):
world.page.click_link_by_text(world.afro.name)
@step(u'Then I should see the list country page')
def then_i_should_see_the_list_country_page(step):
world.page = ListCountriesPage(world.browser, world.afro)
world.page.validate_url() |
oliverlee/antlia | python/antlia/dtc.py | Python | bsd-2-clause | 2,572 | 0.000778 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluste | r import KMeans
def di | st(a, b=None):
if b is None:
# assume 'a' is a pair
assert len(a) == 2
b = a[1]
a = a[0]
ax, ay = a
bx, by = b
return np.sqrt((ax - bx)**2 + (ay - by)**2)
def bcp(cluster_a, cluster_b):
min_dist = np.inf
pair = None
for a in cluster_a:
for b in cluster_b:
d = dist(a, b)
if d < min_dist:
min_dist = d
pair = (a, b)
return pair
def cluster(x, y):
try:
x = x.compressed()
y = y.compressed()
except AttributeError:
pass
x = np.reshape(x, (-1,))
y = np.reshape(y, (-1,))
X = np.vstack((x, y)).transpose()
kmeans = KMeans(2).fit(X)
index0 = kmeans.labels_ == 0
index1 = kmeans.labels_ == 1
# set indices such that cluster A is on the left
if x[index0][0] >= x[index1][0]:
index0 = kmeans.labels_ == 1
index1 = kmeans.labels_ == 0
cluster_a = np.ma.array(list(zip(x[index0], y[index0])))
cluster_b = np.ma.array(list(zip(x[index1], y[index1])))
return cluster_a, cluster_b
def plot_closest_pair(cluster_a, cluster_b,
pair=None, colors=None, ax=None, **kwargs):
if colors is None:
colors = sns.color_palette('Paired', 10)[1::2]
return_pair = False
if pair is None:
# need to calculate pair
pair = bcp(cluster_a, cluster_b)
return_pair = True
if ax is None:
_, ax = plt.subplots(**kwargs)
# plot clusters
ax.plot(*cluster_a.T,
linestyle='None',
marker='.',
color=colors[0],
label='cluster A')
ax.plot(*cluster_b.T, marker='.',
linestyle='None',
color=colors[1],
label='cluster B')
# plot closest pair with different markers
ax.plot(*pair[0],
linestyle='None',
marker='o', markersize=10,
markerfacecolor='None',
color=colors[2],
label='closest pair A')
ax.plot(*pair[1],
linestyle='None',
marker='o', markersize=10,
markerfacecolor='None',
color=colors[2],
label='closest pair B')
# plot line connecting closest pair
ax.plot(*np.vstack(pair).T,
color=colors[2],
label='closest pair line')
ax.legend()
if return_pair:
return ax, pair
return ax
|
CroissanceCommune/autonomie | autonomie/alembic/versions/1_7_client_to_custom_3f746e901aa6.py | Python | gpl-3.0 | 3,301 | 0.003029 | """1.7 : Client to Customer
Revision ID: 3f746e901aa6
Revises: 2b29f533fdfc
Create Date: 2010-10-14 14:47:39.964827
"""
# revision identifiers, used by Alembic.
revision = '3f746e901aa6'
down_revision = '29299007fe7d'
from alembic import op
import sqlalchemy as sa
foreign_key_names = (
("invoice", "invoice_ibfk_4",),
("estimation", "estimation_ibfk_3",),
("cancelin | voice", "cancelinvoice_ibfk_4",),
("manualinv", "manualinv_ibfk_2",),
)
def remove_foreign_key(table, key):
q | uery = "alter table %s drop foreign key %s;" % (table, key)
try:
op.execute(query)
has_key = True
except:
import traceback
traceback.print_exc()
print "An error occured dropping foreign key"
has_key = False
return has_key
def upgrade():
from autonomie.alembic.utils import force_rename_table
from autonomie.alembic.utils import rename_column
has_fkey = {
'estimation':False,
"invoice": False,
"cancelinvoice": False,
"manualinv": False
}
# Remove foreign keys to be able to rename columns
for table, fkey in foreign_key_names:
has_fkey[table] = remove_foreign_key(table, fkey)
# Rename columns
for table in (
'estimation',
'invoice',
'cancelinvoice',
'manualinv',
):
rename_column(table, 'client_id', 'customer_id')
op.execute("delete from %s where customer_id=0;" % table)
# Add the foreign key constraint again
for table, fkey in foreign_key_names:
if has_fkey[table]:
op.create_foreign_key(
fkey,
table,
'customer',
['customer_id'],
['id'])
remove_foreign_key("project_client", "project_client_ibfk_2")
# Rename the project client
force_rename_table('project_client', 'project_customer')
# Rename the column
rename_column('project_customer', 'client_id', 'customer_id')
op.create_foreign_key(
"project_customer_ibfk_2",
'project_customer',
'customer',
['customer_id'],
['id'])
def downgrade():
from autonomie.alembic.utils import force_rename_table
from autonomie.alembic.utils import rename_column
for table, key in foreign_key_names:
remove_foreign_key(table, key)
for table in ('estimation', 'invoice', 'cancelinvoice', 'manualinvoice'):
fkey = "%s_ibfk_1" % table
query = "alter table %s drop foreign key %s;" % (table, fkey)
op.execute(query)
rename_column(table, 'customer_id', 'client_id')
for table, key in foreign_key_names:
op.create_foreign_key(
fkey,
table,
'customer',
['client_id'],
['id'])
force_rename_table('project_customer', 'project_client')
# Rename the column
fkey = "project_client_ibfk_2"
remove_foreign_key("project_client", "project_customer_ibfk_2")
rename_column('project_client', 'customer_id', 'client_id')
op.create_foreign_key(
"project_client_ibfk_2",
'project_client',
'customer',
['client_id'],
['id'])
|
jimboatarm/workload-automation | wlauto/workloads/applaunch/__init__.py | Python | apache-2.0 | 7,678 | 0.003126 | # Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=attribute-defined-outside-init
import os
from wlauto import Workload, AndroidBenchmark, AndroidUxPerfWorkload, UiAutomatorWorkload
from wlauto import Parameter
from wlauto import ExtensionLoader
from wlauto import File
from wlauto import settings
from wlauto.exceptions import ConfigError
from wlauto.exceptions import ResourceError
from wlauto.utils.android import ApkInfo
from wlauto.utils.uxperf import UxPerfParser
import wlauto.common.android.resources
class Applaunch(AndroidUxPerfWorkload):
name = 'applaunch'
description = '''
This workload launches and measures the launch time of applications for supporting workloads.
Currently supported workloads are the ones that implement ``ApplaunchInterface``. For any
workload to support this workload, it should implement the ``ApplaunchInterface``.
The corresponding java file of the workload associated with the application being measured
is executed during the run. The application that needs to be
measured is passed as a parametre ``workload_name``. The parameters required for that workload
have to be passed as a dictionary which is captured by the parametre ``workload_params``.
This information can be obtained by inspecting the workload details of the specific workload.
The workload allows to run multiple iterations of an application
launch in two modes:
1. Launch from background
2. Launch from long-idle
These modes are captured as a parameter applaunch_type.
``launch_from_background``
Launches an application after the application is sent to background by
pressing Home button.
``launch_from_long-idle``
Launches an application after killing an application process and
clearing all the caches.
**Test Description:**
- During the initialization and setup, the application being launched is launched
for the first time. The jar file of the workload of the application
is moved to device at the location ``workdir`` which further implements the methods
needed to measure the application launch time.
- Run phase calls the UiAutomator of the applaunch which runs in two subphases.
A. Applaunch Setup Run:
During this phase, welcome screens and dialogues during the first launch
of the instrumented application are cleared.
B. Applaunch Metric Run:
During this phase, the application is launched multiple times determined by
the iteration number specified by the parametre ``applaunch_iterations``.
Each of these iterations are instrumented to capture the launch time taken
and the values are recorded as UXPERF marker values in logfile.
'''
supported_platforms = ['android']
parameters = [
Parameter('workload_name', kind=str,
description='Name of the uxperf workload to launch',
default='gmail'),
Parameter('workload_params', kind=dict, default={},
description="""
parameters of the uxperf workload whose application launch
time is measured
"""),
Parameter('applaunch_type', kind=str, default='launch_from_background',
allowed_values=['launch_from_background', 'launch_from_long-idle'],
description="""
Choose launch_from_long-idle for measuring launch time
from long-idle. These two types are described in the class
description.
"""),
Parameter('applaunch_iterations', kind=int, default=1,
description="""
Number of iterations of the application launch
"""),
Parameter('report_results', kind=bool, default=True,
description="""
Choose to report results of the application launch time.
"""),
]
def __init__(self, device, **kwargs):
super(Applaunch, self).__init__(device, **kwargs)
def init_resources(self, context):
super(Applaunch, self).init_resources(context)
loader = ExtensionLoader(packages=settings.extension_packages, paths=settings.extension_paths)
self.workload_params['markers_enabled'] = True
self.workload = loader.get_workload(self.workload_name, self.device,
**self.workload_params)
# This workload's uiauto apk will not be installed -- automation will be loaded directly form a path
# so do not uninstall during teardown
self.workload.uninstall_uiauto_apk = False
self.init_workload_resources(context)
self.package = self.workload.package
def init_workload_resources(self, context):
self.workload.uiauto_file = context.resolv | er.get(wlauto.common.android.resources.ApkFile(self.workload, uiauto=True))
if not self.workload.uiauto_file:
raise ResourceError('No UI automation Uiauto APK file found for worklo | ad {}.'.format(self.workload.name))
self.workload.device_uiauto_file = self.device.path.join(self.device.working_directory, os.path.basename(self.workload.uiauto_file))
if not self.workload.uiauto_package:
self.workload.uiauto_package = os.path.splitext(os.path.basename(self.workload.uiauto_file))[0]
def validate(self):
super(Applaunch, self).validate()
self.workload.validate()
self.pass_parameters()
def pass_parameters(self):
self.uiauto_params['workload'] = self.workload.name
self.uiauto_params['package_name'] = self.workload.package
self.uiauto_params.update(self.workload.uiauto_params)
if self.workload.activity:
self.uiauto_params['launch_activity'] = self.workload.activity
else:
self.uiauto_params['launch_activity'] = "None"
self.uiauto_params['applaunch_type'] = self.applaunch_type
self.uiauto_params['applaunch_iterations'] = self.applaunch_iterations
def setup(self, context):
AndroidBenchmark.setup(self.workload, context)
if not self.workload.launch_main:
self.workload.launch_app()
UiAutomatorWorkload.setup(self, context)
self.workload.device.push_file(self.workload.uiauto_file, self.workload.device_uiauto_file)
def run(self, context):
UiAutomatorWorkload.run(self, context)
def update_result(self, context):
super(Applaunch, self).update_result(context)
if self.report_results:
parser = UxPerfParser(context, prefix='applaunch_')
logfile = os.path.join(context.output_directory, 'logcat.log')
parser.parse(logfile)
parser.add_action_timings()
def teardown(self, context):
super(Applaunch, self).teardown(context)
AndroidBenchmark.teardown(self.workload, context)
UiAutomatorWorkload.teardown(self.workload, context)
|
gaursagar/cclib | src/cclib/parser/logfileparser.py | Python | bsd-3-clause | 18,434 | 0.003743 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Generic output file parser and related tools"""
import bz2
import fileinput
import gzip
import inspect
import io
import logging
import os
import random
import sys
import zipfile
import numpy
from . import utils
from .data import ccData
from .data import ccData_optdone_bool
# This seems to avoid a problem with Avogadro.
logging.logMultiprocessing = 0
class myBZ2File(bz2.BZ2File):
"""Return string instead of bytes"""
def __next__(self):
line = super(bz2.BZ2File, self).__next__()
return line.decode("ascii", "replace")
def next(self):
line = self.__next__()
return line
class myGzipFile(gzip.GzipFile):
"""Return string instead of bytes"""
def __next__(self):
super_ob = super(gzip.GzipFile, self)
# seemingly different versions of gzip can have either next or __next__
if hasattr(super_ob, 'next'):
line = super_ob.next()
else:
line = super_ob.__next__()
return line.decode("ascii", "replace")
def next(self):
line = self.__next__()
return line
class myFileinputFile(fileinput.FileInput):
"""Implement next() method"""
def next(self):
line = next(self)
return line
class FileWrapper(object):
"""Wrap a file-like object or stream with some custom tweaks"""
def __init__(self, source, pos=0):
self.src = source
# Most file-like objects have seek and tell methods, but streams returned
# by urllib.urlopen in Python2 do not, which will raise an AttributeError
# in this code. On the other hand, in Python3 these methods do exist since
# urllib uses the stream class in the io library, but they raise a different
# error, namely io.UnsupportedOperation. That is why it is hard to be more
# specific with except block here.
try:
self.src.seek(0, 2)
self.size = self.src.tell()
self.src.seek(pos, 0)
except (AttributeError, IOError, io.UnsupportedOperation):
# Stream returned by urllib should have size information.
if hasattr(self.src, 'headers') and 'content-length' in self.src.headers:
self.size = int(self.src.headers['content-length'])
else:
self.size = pos
# Assume the position is what was passed to the constructor.
self.pos = pos
def next(self):
line = next(self.src)
self.pos += len(line)
return line
def __next__(self):
return self.next()
def __iter__(self):
return self
def close(self):
self.src.close()
def seek(self, pos, ref):
# If we are seeking to end, we can emulate it usually. As explained above,
# we cannot be too specific with the except clause due to differences
# between Py | thon2 and 3. Yet another reason to drop Python 2 soon!
try:
self.src.seek(pos, ref)
except:
if ref == 2:
| self.src.read()
else:
raise
if ref == 0:
self.pos = pos
if ref == 1:
self.pos += pos
if ref == 2 and hasattr(self, 'size'):
self.pos = self.size
def openlogfile(filename, object=None):
"""Return a file object given a filename or if object specified decompresses it
if needed and wrap it up.
Given the filename or file object of a log file or a gzipped, zipped, or bzipped
log file, this function returns a file-like object.
Given a list of filenames, this function returns a FileInput object,
which can be used for seamless iteration without concatenation.
"""
# If there is a single string argument given.
if type(filename) in [str, str]:
extension = os.path.splitext(filename)[1]
if extension == ".gz":
fileobject = myGzipFile(filename, "r", fileobj=object)
elif extension == ".zip":
zip = zipfile.ZipFile(object, "r") if object else zipfile.ZipFile(filename, "r")
assert len(zip.namelist()) == 1, "ERROR: Zip file contains more than 1 file"
fileobject = io.StringIO(zip.read(zip.namelist()[0]).decode("ascii", "ignore"))
elif extension in ['.bz', '.bz2']:
# Module 'bz2' is not always importable.
assert bz2 is not None, "ERROR: module bz2 cannot be imported"
fileobject = myBZ2File(object, "r") if object else myBZ2File(filename, "r")
else:
# Assuming that object is text file encoded in utf-8
fileobject = io.StringIO(object.decode('utf-8')) if object \
else FileWrapper(io.open(filename, "r", errors='ignore'))
return fileobject
elif hasattr(filename, "__iter__"):
# This is needed, because fileinput will assume stdin when filename is empty.
if len(filename) == 0:
return None
# Compression (gzip and bzip) is supported as of Python 2.5.
if sys.version_info[0] >= 2 and sys.version_info[1] >= 5:
fileobject = fileinput.input(filename, openhook=fileinput.hook_compressed)
else:
fileobject = myFileinputFile(filename)
return fileobject
class Logfile(object):
"""Abstract class for logfile objects.
Subclasses defined by cclib:
ADF, DALTON, GAMESS, GAMESSUK, Gaussian, Jaguar, Molpro, MOPAC,
NWChem, ORCA, Psi, Q-Chem
"""
def __init__(self, source, loglevel=logging.INFO, logname="Log",
logstream=sys.stdout, datatype=ccData_optdone_bool, **kwds):
"""Initialise the Logfile object.
This should be called by a subclass in its own __init__ method.
Inputs:
source - a logfile, list of logfiles, or stream with at least a read method
loglevel - integer corresponding to a log level from the logging module
logname - name of the source logfile passed to this constructor
logstream - where to output the logging information
datatype - class to use for gathering data attributes
"""
# Set the filename to source if it is a string or a list of strings, which are
# assumed to be filenames. Otherwise, assume the source is a file-like object
# if it has a read method, and we will try to use it like a stream.
if isinstance(source, str):
self.filename = source
self.isstream = False
elif isinstance(source, list) and all([isinstance(s, str) for s in source]):
self.filename = source
self.isstream = False
elif hasattr(source, "read"):
self.filename = "stream %s" % str(type(source))
self.isstream = True
self.stream = source
else:
raise ValueError
# Set up the logger.
# Note that calling logging.getLogger() with one name always returns the same instance.
# Presently in cclib, all parser instances of the same class use the same logger,
# which means that care needs to be taken not to duplicate handlers.
self.loglevel = loglevel
self.logname = logname
self.logger = logging.getLogger('%s %s' % (self.logname, self.filename))
self.logger.setLevel(self.loglevel)
if len(self.logger.handlers) == 0:
handler = logging.StreamHandler(logstream)
handler.setFormatter(logging.Formatter("[%(name)s %(levelname)s] %(message)s"))
self.logger.addHandler(handler)
# Set up the metadata.
if not hasattr(self, "metadata"):
self.metadata = {}
self.metadata["package"] = self.logname
self.metadata["methods"] = []
# Periodic table of elements.
self.table = utils.PeriodicTable()
# This is the class that will be used in the data object returned by parse(), and shou |
lumidify/fahrenheit451 | EditorClasses.py | Python | gpl-2.0 | 37,759 | 0.005058 | import os
import sys
import math
import pygame
import loader
import importlib
import tkinter as tk
from QuadTree import *
from tkinter import ttk
from CONSTANTS import *
from pygame.locals import *
TILEWIDTH = 128
TILEHEIGHT = 64
def load_module(path):
spec = importlib.util.spec_from_file_location("module.name", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def calculate_rect(grid_pos, borders):
return Rect((
grid_pos[0] + borders[0]) * WIDTH,
(grid_pos[1] + borders[2]) * HEIGHT,
(abs(borders[0]) + abs(borders[1])) * WIDTH,
(abs(borders[2]) + abs(borders[3])) * HEIGHT)
class Floor():
def __init__(self, screen, tiles):
self.screen = screen
self.tiles = tiles
self.layers = []
self.default_tile = pygame.image.load("grid.png").convert_alpha()
self.size = [0, 0]
def populate_listbox(self, listbox):
listbox.delete(0, "end")
for x in range(len(self.layers)):
listbox.insert("end", "Layer " + str(x))
def insert_layer(self, index, listbox):
self.layers.insert(index, [[-1 for x in range(self.size[0])] for y in range(self.size[1])])
self.populate_listbox(listbox)
def remove_layer(self, index, listbox):
self.layers.pop(index)
self.populate_listbox(listbox)
def load_tilemap(self, tilemap_path, size):
self.layers = []
self.size = size
temp = load_module(tilemap_path)
for layer in temp.layers:
self.layers.append([])
for line in layer.strip().splitlines():
self.layers[-1].append([int(x) for x in line.strip().split()])
self.change_size()
def replace_tile(self, tile_id, tile_pos, layer):
self.layers[layer][tile_pos[1]][tile_pos[0]] = tile_id
def create_map(self, size):
self.size = size
self.layers = [[]]
self.change_size()
def change_size(self):
if self.size[0] == 0 or self.size[1] == 0:
self.layers = [[] for x in self.layers]
for layer in self.layers:
for line in layer:
if len(line) > self.size[0]:
line[:] = line[:self.size[0]]
elif len(line) < self.size[0]:
line += [-1 for x in range(self.size[0] - len(line))]
if len(layer) > self.size[1]:
layer[:] = layer[:self.size[1]]
elif len(layer) < self.size[1]:
layer += [[-1 for x in range(self.size[0])] for y in range(self.size[1]- len(layer))]
def save(self, path):
temp_layers = []
for layer in self.layers:
temp_layers.append("")
max_width = 0
for line in layer:
line = [str(x) for x in line]
line_max = max([len(x) for x in line])
if line_max > max_width:
max_width = line_max
for line in layer:
temp_layers[-1] = temp_layers[-1] + " ".join([str(x).rjust(max_width) for x in line]) + "\n"
with open(path, "w") as f:
f.write("layers = " + repr(temp_layers))
def draw_cursor(self, tile_id, pos, screen_offset):
if tile_id != -1:
tile_dict = self.tiles[tile_id]
if not tile_dict.get("image", None):
tile_dict = tile_dict["images"][0]
isox = (pos[0] - pos[1]) * (TILEWIDTH // 2) + tile_dict["offset"][0]
isoy = (pos[0] + pos[1]) * (TILEHEIGHT // 2) + tile_dict["offset"][1] + TILEHEIGHT // 2
self.screen.blit(tile_dict["image"], (isox + screen_offset[0], isoy + screen_offset[1]), tile_dict["region"])
def draw(self, screen_offset):
if self.layers:
for layer in self.layers:
for line_index, line in enumerate(layer):
for tile_index, tile in enumerate(line):
if tile != -1:
tile_dict = self.tiles[tile]
if not tile_dict.get("image", None):
| tile_dict = tile_dict["images"][0]
isox = (tile_index - line_index) * (TILEWIDTH // 2) + tile_dict["offset"][0]
isoy = (tile_index + line_index) * (TILEHEIGHT // 2) + tile_dict["offset"][1] + TILEHEIGHT // 2
self.screen.blit(tile_dict["image"], (isox + screen_offset[0], isoy + screen_offset[1]), tile_dict["region"])
for y in range(len( | self.layers[0])):
for x in range(len(self.layers[0][0])):
isox = (x - y) * (TILEWIDTH // 2) - 64
isoy = (x + y) * (TILEHEIGHT // 2)
self.screen.blit(self.default_tile, (isox + screen_offset[0], isoy + screen_offset[1]))
class BasicObstacle():
def __init__(self, screen, **kwargs):
self.screen = screen
self.x = kwargs.get("x", 0)
self.y = kwargs.get("y", 0)
self.width = kwargs.get("width", 1)
self.height = kwargs.get("height", 1)
self.type = kwargs.get("type", "RECT")
self.identifier = kwargs.get("id", None)
self.selected = False
self.generate_points()
def set_values(self, **kwargs):
self.x = kwargs.get("x", self.x)
self.y = kwargs.get("y", self.y)
self.width = kwargs.get("width", self.width)
self.height = kwargs.get("height", self.height)
self.identifier = kwargs.get("id", self.identifier)
self.generate_points()
def select(self):
self.selected = True
def deselect(self):
self.selected = False
def generate_points(self):
self.rect = Rect(self.x * WIDTH, self.y * HEIGHT, self.width * WIDTH, self.height * HEIGHT)
self.topleft = ((self.x - self.y) * (TILEWIDTH // 2), (self.x + self.y) * (TILEHEIGHT // 2))
self.bottomleft = ((self.x - (self.y + self.height)) * (TILEWIDTH // 2), (self.x + self.y + self.height) * (TILEHEIGHT // 2))
self.topright = ((self.x + self.width - self.y) * (TILEWIDTH // 2), (self.x + self.width + self.y) * (TILEHEIGHT // 2))
self.bottomright = ((self.x + self.width - (self.y + self.height)) * (TILEWIDTH // 2), (self.x + self.width + self.y + self.height) * (TILEHEIGHT // 2))
def resize(self, height, width):
self.height = height
self.width = width
self.generate_points()
def move(self, x, y):
self.x = x
self.y = y
self.generate_points()
def get_rect(self, rect_type):
if rect_type == "rect":
return self.rect
def get_dict(self):
return {"x": self.x, "y": self.y, "width": self.width, "height": self.height, "type": self.type}
def draw(self, screen_offset):
if self.selected:
color = (255, 0, 0)
else:
color = (255, 255, 255)
points = []
for point in [self.topleft, self.topright, self.bottomright, self.bottomleft]:
points.append([point[0] + screen_offset[0], point[1] + screen_offset[1]])
pygame.draw.lines(self.screen, color, True, points, 5)
class Trigger(BasicObstacle):
def __init__(self, screen, **kwargs):
super().__init__(screen, **kwargs)
self.trigger = kwargs.get("trigger", None)
self.deactivate_after_use = kwargs.get("deactivate_after_use", False)
self.active = kwargs.get("active", True)
def set_values(self, **kwargs):
super().set_values(**kwargs)
self.trigger = kwargs.get("trigger", self.trigger)
self.deactivate_after_use = kwargs.get("deactivate_after_use", self.deactivate_after_use)
self.active = kwargs.get("active", self.active)
def get_dict(self):
temp = {"x": self.x, "y": self.y, "width": self.width, "height": self.height, "trigger": self.trigger}
if self.deactivate_after_use:
temp.update({"deactivate_after_use": self.deactivate_after_use})
if not self.active:
temp.update({"active": self.active})
if self.identifier:
temp.update({"id": self.identifi |
tensorflow/privacy | tensorflow_privacy/privacy/dp_query/tree_range_query_test.py | Python | apache-2.0 | 7,555 | 0.006089 | # Copyright 2021, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `tree_range_query`."""
import math
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_privacy.privacy.dp_query import tree_range_query
class BuildTreeTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.product(
leaf_nodes_size=[1, 2, 3, 4, 5],
arity=[2, 3],
dtype=[tf.int32, tf.float32],
)
def test_build_tree_from_leaf(self, leaf_nodes_size, arity, dtype):
"""Test whether `_build_tree_from_leaf` will output the correct tree."""
leaf_nodes = tf.cast(tf.range(leaf_nodes_size), dtype)
depth = math.ceil(math.log(leaf_nodes_size, arity)) + 1
tree = tree_range_query._build_tree_from_leaf(leaf_nodes, arity)
self.assertEqual(depth, tree.shape[0])
for layer in range(depth):
reverse_depth = tree.shape[0] - layer - 1
span_size = arity**reverse_depth
for idx in range(arity**layer):
left = idx * span_size
right = (idx + 1) * span_size
expected_value = sum(leaf_nodes[left:right])
self.assertEqual(tree[layer][idx], expected_value)
class TreeRangeSumQueryTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.product(
inner_query=['central', 'distributed'],
params=[(0., 1., 2), (1., -1., 2), (1., 1., 1)],
)
def test_raises_error(self, inner_query, params):
clip_norm, stddev, arity = params
with self.assertRaises(ValueError):
if inner_query == 'central':
tree_range_query.TreeRangeSumQuery.build_central_gaussian_query(
clip_norm, stddev, arity)
elif inner_query == 'distributed':
tree_range_query.TreeRangeSumQuery.build_distributed_discrete_gaussian_query(
clip_norm, stddev, arity)
@parameterized.product(
inner_query=['central', 'distributed'],
clip_norm=[0.1, 1.0, 10.0],
stddev=[0.1, 1.0, 10.0])
def test_initial_global_state_type(self, inner_query, clip_norm, stddev):
if inner_query == 'central':
query = tree_range_query.TreeRangeSumQuery.build_central_gaussian_query(
clip_norm, stddev)
elif inner_query == 'distributed':
query = tree_range_query.TreeRangeSumQuery.build_distributed_discrete_gaussian_query(
clip_norm, stddev)
global_state = query.initial_global_state()
self.assertIsInstance(global_state,
tree_range_query.TreeRangeSumQuery.GlobalState)
@parameterized.product(
inner_query=['central', 'distributed'],
clip_norm=[0.1, 1.0, 10.0],
stddev=[0.1, 1.0, 10.0],
arity=[2, 3, 4])
def test_derive_sample_params(self, inner_query, clip_norm, stddev, arity):
if inner_query == 'central':
query = tree_range_query.TreeRangeSumQuery.build_central_gaussian_query(
clip_norm, stddev, arity)
elif inner_query == 'distributed':
query = tree_range_query.TreeRangeSumQuery.build_distributed_discrete_gaussian_query(
clip_norm, stddev, arity)
global_state = query.initial_global_state()
derived_arity, inner_query_state = query.derive_sample_params(global_state)
self.assertAllClose(derived_arity, arity)
if inner_query == 'central':
self.assertAllClose(inner_query_state, clip_norm)
elif inner_query == 'distributed':
self.assertAllClose(inner_query_state.l2_norm_bound, clip_norm)
self.assertAllClose(inner_query_state.local_stddev, stddev)
@parameterized.product(
(dict(arity=2, expected_tree=[1, 1, 0, 1, 0, 0, 0]),
dict(arity=3, expected_tree=[1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])),
inner_query=['central', 'distributed'],
)
def test_preprocess_record(self, inner_query, arity, expected_tree):
if inner_query == 'central':
query = tree_range_query.TreeRangeSumQuery.build_central_gaussian_query(
10., 0., arity)
record = tf.constant([1, 0, 0, 0], dtype=tf.float32)
expected_tree = tf.cast(expected_ | tree, tf.float32)
elif inner_query == 'distributed':
query = tree_range_qu | ery.TreeRangeSumQuery.build_distributed_discrete_gaussian_query(
10., 0., arity)
record = tf.constant([1, 0, 0, 0], dtype=tf.int32)
global_state = query.initial_global_state()
params = query.derive_sample_params(global_state)
preprocessed_record = query.preprocess_record(params, record)
self.assertAllClose(preprocessed_record, expected_tree)
@parameterized.named_parameters(
('stddev_1', 1, tf.constant([1, 0], dtype=tf.int32), [1, 1, 0]),
('stddev_0_1', 4, tf.constant([1, 0], dtype=tf.int32), [1, 1, 0]),
)
def test_distributed_preprocess_record_with_noise(self, local_stddev, record,
expected_tree):
query = tree_range_query.TreeRangeSumQuery.build_distributed_discrete_gaussian_query(
10., local_stddev)
global_state = query.initial_global_state()
params = query.derive_sample_params(global_state)
preprocessed_record = query.preprocess_record(params, record)
self.assertAllClose(
preprocessed_record, expected_tree, atol=10 * local_stddev)
@parameterized.product(
(dict(
arity=2,
expected_tree=tf.ragged.constant([[1], [1, 0], [1, 0, 0, 0]])),
dict(
arity=3,
expected_tree=tf.ragged.constant([[1], [1, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0]]))),
inner_query=['central', 'distributed'],
)
def test_get_noised_result(self, inner_query, arity, expected_tree):
if inner_query == 'central':
query = tree_range_query.TreeRangeSumQuery.build_central_gaussian_query(
10., 0., arity)
record = tf.constant([1, 0, 0, 0], dtype=tf.float32)
expected_tree = tf.cast(expected_tree, tf.float32)
elif inner_query == 'distributed':
query = tree_range_query.TreeRangeSumQuery.build_distributed_discrete_gaussian_query(
10., 0., arity)
record = tf.constant([1, 0, 0, 0], dtype=tf.int32)
global_state = query.initial_global_state()
params = query.derive_sample_params(global_state)
preprocessed_record = query.preprocess_record(params, record)
sample_state, global_state, _ = query.get_noised_result(
preprocessed_record, global_state)
self.assertAllClose(sample_state, expected_tree)
@parameterized.product(stddev=[0.1, 1.0, 10.0])
def test_central_get_noised_result_with_noise(self, stddev):
query = tree_range_query.TreeRangeSumQuery.build_central_gaussian_query(
10., stddev)
global_state = query.initial_global_state()
params = query.derive_sample_params(global_state)
preprocessed_record = query.preprocess_record(params, tf.constant([1., 0.]))
sample_state, global_state, _ = query.get_noised_result(
preprocessed_record, global_state)
self.assertAllClose(
sample_state, tf.ragged.constant([[1.], [1., 0.]]), atol=10 * stddev)
if __name__ == '__main__':
tf.test.main()
|
unitecoin-org/Unitecoin | contrib/bitrpc/bitrpc.py | Python | mit | 7,840 | 0.038138 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Unitecoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Unitecoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimite | d, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == | "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
Sookhaal/auri_maya_rigging_scripts | general/center_of_gravity.py | Python | mit | 5,456 | 0.002933 | """
:created: 2017-09
:author: Alex BROSSARD <abrossard@artfx.fr>
"""
from PySide2 import QtWidgets, QtCore
from pymel import core as pmc
from auri.auri_lib import AuriScriptView, AuriScriptController, AuriScriptModel, grpbox
from auri.scripts.Maya_Scripts import rig_lib
from auri.scripts.Maya_Scripts.rig_lib import RigController
reload(rig_lib)
class View(AuriScriptView):
def __init__(self, *args, **kwargs):
self.modules_cbbox = QtWidgets.QComboBox()
self.outputs_cbbox = QtWidgets.QComboBox()
self.refresh_btn = QtWidgets.QPushButton("Refresh")
self.prebuild_btn = QtWidgets.QPushButton("Prebuild")
super(View, self).__init__(*args, **kwargs)
def set_controller(self):
self.ctrl = Controller(self.model, self)
def set_model(self):
self.model = Model()
def refresh_view(self):
self.ctrl.look_for_parent()
def setup_ui(self):
self.modules_cbbox.setModel(self.ctrl.modules_with_output)
self.modules_cbbox.currentTextChanged.connect(self.ctrl.on_modules_cbbox_changed)
self.outputs_cbbox.setModel(self.ctrl.outputs_model)
self.outputs_cbbox.currentTextChanged.connect(self.ctrl.on_outputs_cbbox_changed)
self.refresh_btn.clicked.connect(self.ctrl.look_for_parent)
self.prebuild_btn.clicked.connect(self.ctrl.prebuild)
main_layout = QtWidgets.QVBoxLayout()
select_parent_layout = QtWidgets.QVBoxLayout()
select_parent_grp = grpbox("Select parent", select_parent_layout)
cbbox_layout = QtWidgets.QHBoxLayout()
cbbox_layout.addWidget(self.modules_cbbox)
cbbox_layout.addWidget(self.outputs_cbbox)
select_parent_layout.addLayout(cbbox_layout)
select_parent_layout.addWidget(self.refresh_btn)
main_layout.addWidget(select_parent_grp)
main_layout.addWidget(self.prebuild_btn)
self.setLayout(main_layout)
class Controller(RigController):
def __init__(self, model, view):
"""
Args:
model (Model):
view (View):
"""
self.guides_grp = None
self.guide = None
self.guide_name = "None"
RigController.__init__(self, model, view)
def prebuild(self):
self.create_temporary_outputs(["OUTPUT"])
self.guide_name = "{0}_GUIDE".format(self.model.module_name)
if self.guide_check(self.guide_name):
self.guide = pmc.ls(self.guide_name)
self.guides_grp = pmc.ls("{0}_guides".format(self.model.module_name))[0]
self.guides_grp.setAttr("visibility", 1)
self.view.refresh_view()
pmc.select(cl=1)
return
self.guide = pmc.spaceLocator(p=(0, 0, 0), n=self. | guide_name)
self.guide.setAttr("translate", (0, 7.5, 0))
self.guides_grp = self.group_guides(self.guide)
self.view.refresh_view()
pmc.select(cl=1)
def execute(self):
self.prebuild()
self.delete_e | xisting_objects()
self.connect_to_parent()
cog_shape = rig_lib.large_box_curve("{0}_CTRL_shape".format(self.model.module_name))
cog_ctrl = rig_lib.create_jnttype_ctrl(name="{0}_CTRL".format(self.model.module_name), shape=cog_shape,
drawstyle=2)
cog_ofs = pmc.group(cog_ctrl, n="{0}_ctrl_OFS".format(self.model.module_name))
cog_ofs.setAttr("translate", pmc.xform(self.guide, q=1, ws=1, translation=1))
pmc.parent(cog_ofs, self.ctrl_input_grp)
rig_lib.create_output(name="{0}_OUTPUT".format(self.model.module_name), parent=cog_ctrl)
rig_lib.clean_ctrl(cog_ctrl, 20, trs="s")
self.jnt_input_grp.setAttr("visibility", 0)
self.parts_grp.setAttr("visibility", 0)
self.guides_grp.setAttr("visibility", 0)
info_crv = rig_lib.signature_shape_curve("{0}_INFO".format(self.model.module_name))
info_crv.getShape().setAttr("visibility", 0)
info_crv.setAttr("hiddenInOutliner", 1)
info_crv.setAttr("translateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("translateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("rotateZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleX", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleY", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("scaleZ", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("visibility", lock=True, keyable=False, channelBox=False)
info_crv.setAttr("overrideEnabled", 1)
info_crv.setAttr("overrideDisplayType", 2)
pmc.parent(info_crv, self.parts_grp)
rig_lib.add_parameter_as_extra_attr(info_crv, "Module", "cog")
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_Module", self.model.selected_module)
rig_lib.add_parameter_as_extra_attr(info_crv, "parent_output", self.model.selected_output)
pmc.select(cl=1)
class Model(AuriScriptModel):
def __init__(self):
AuriScriptModel.__init__(self)
self.selected_module = None
self.selected_output = None
|
yashchandak/GNN | Sample_Run/path_attn/Config.py | Python | mit | 4,900 | 0.008163 | import tensorflow as tf
import sys, os, shutil
class Config(object):
def __init__(self, args):
self.codebase_root_path = args.path
sys.path.insert(0, self.codebase_root_path)
#### Directory paths ####
# Folder name and project name is the same
self.project_name = args.project
self.dataset_name = args.dataset
self.train_percent = args.percent
self.train_fold = args.folds
self.logs_d = '/Logs/'
self.ckpt_d = '/Checkpoints/'
self.embed_d = '/Embeddings/'
self.result_d = '/Results/'
# Retrain
self.retrain = args.retrain
# Debug with small dataset
self.debug = args.debug
# Batch size
self.batch_size = args.batch_size
# maximum depth for trajecory from NOI
self.max_depth = args.max_depth
# Number of steps to run trainer
self.max_outer_epochs = args.max_outer
self.max_inner_epochs = args.max_inner
self.boot_epochs = args.boot_epochs
self.boot_reset = args.boot_reset
# Validation frequence
self.val_epochs_freq = args.val_freq #1
# Model save frequency
self.save_epochs_after = args.save_after #0
# earlystopping hyperparametrs
self.patience = args.pat # look as this many epochs regardless
self.patience_increase = args.pat_inc # wait this much longer when a new best is found
| self.improvement_threshold = args.pat_improve # a relative improvement of this much is considered significant
self.metrics = ['coverage', 'average_precision', 'ranking_loss', 'micro_f1', 'macro_f1', 'micro_precision',
'macro_precision', 'micro_recall', 'macro_recall', 'p@1', 'p@3', 'p@5', 'hamming_loss',
'bae', 'cross-entropy', 'accur | acy']
class Solver(object):
def __init__(self, args):
# Initial learning rate
self.learning_rate = args.lr
self.label_update_rate = args.lu
# optimizer
if args.opt == 'adam': self.opt = tf.train.AdamOptimizer
elif args.opt == 'rmsprop': self.opt = tf.train.RMSPropOptimizer
elif args.opt == 'sgd': self.opt= tf.train.GradientDescentOptimizer
else: raise ValueError('Undefined type of optmizer')
self._optimizer = self.opt(self.learning_rate)
self._curr_label_loss = True
self._L2loss = args.l2
self.wce = args.wce
self.gradients = args.gradients
class Data_sets(object):
def __init__(self, args):
self.reduced_dims = args.reduce
self.binary_label_updates = args.bin_upd
self.label_type = args.labels
class RNNArchitecture(object):
def __init__(self, args):
self._hidden_size = args.hidden
self._keep_prob_in = 1 - args.drop_in
self._keep_prob_out = 1 - args.drop_out
self.cell = args.cell
self.concat = args.concat
self.attention = args.attention
self.solver = Solver(args)
self.data_sets = Data_sets(args)
self.mRNN = RNNArchitecture(args)
self.init2()
def init2(self):
self.walks_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/walks/walks_80.txt'
self.label_fold_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/'+self.data_sets.label_type+'/'+ str(self.train_percent) + '/' + str(self.train_fold) + '/'
self.label_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/labels.npy'
self.features_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/features.npy'
#Logs and checkpoints to be stored in the code directory
self.project_prefix_path = self.codebase_root_path+ self.project_name+'/'
def check_n_create(self, path):
if not os.path.exists(path):
#if the path doesn't exists, create it
os.mkdir(path)
else:
if not self.retrain:
#path exists but if retrain in False, then replace previous folder with new folder
shutil.rmtree(path)
os.mkdir(path)
def create(self, ext_path =""):
#create directories
ext_path = './'+ext_path
self.logs_dir = ext_path + self.logs_d
self.ckpt_dir = ext_path + self.ckpt_d
#self.embed_dir= ext_path + self.embed_d
self.results_folder = ext_path+self.result_d
self.check_n_create(ext_path)
self.check_n_create(self.logs_dir)
self.check_n_create(self.ckpt_dir)
#self.check_n_create(self.embed_dir)
self.check_n_create(self.results_folder)
|
kerneltask/micropython | tests/basics/async_with.py | Python | mit | 750 | 0.005333 | # test simple async with execution
class AContext:
async def __aenter__(self):
print('enter')
return 1
async def __aexit__(self, exc_type, exc, tb):
print('exit', exc_type, exc) |
async def f():
async with AContext():
print('body')
o = f()
try:
o.send(None)
except StopIteration:
print('finished')
async def g():
async with AContext() as ac:
print(ac)
raise ValueError('error')
o = g()
try:
o.send(None)
except ValueError:
print('ValueError')
# test raising BaseException to make sure it is handled by the async-with
async def h():
async with AContext():
raise BaseException
o = h()
try:
o.se | nd(None)
except BaseException:
print('BaseException')
|
nop33/indico | indico/web/fields/base.py | Python | gpl-3.0 | 5,600 | 0.00125 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from copy import deepcopy
from wtforms.fields import BooleanField, StringField, TextAreaField
from wtforms.validators import DataRequired, Optional
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm
from indico.web.forms.widgets import SwitchWidget
class FieldConfigForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()], description=_("The title of the field"))
description = TextAreaField(_('Description'), description=_("The description of the field"))
is_required = BooleanField(_('Required'), widget=SwitchWidget(),
description=_("Whether the user has to fill out the field"))
class BaseField(object):
"""Base class for a custom field.
To create a new field, subclass this class and register
it using the `get_fields` signal.
:param obj: The object associated with the field.
"""
#: unique name of the field type
name = None
#: plugin containing this field type - assigned automatically
plugin = None
#: displayed name of the field type
friendly_name = None
#: wtform field class for this field
wtf_field_class = None
#: the base class for the config form
config_form_base = FieldConfigForm
#: the WTForm used to configure the field. this must not be an
#: actual `Form` subclass but just a regular object containing
#: fields. it will be mixed into `config_form_base` to create
#: the actual form.
config_form = None
#: the validator to use if the field is required
required_validator = DataRequired
#: the validator to use if the field is not required
not_required_validator = Optional
#: the common settings stored on the object itself instead of
#: the `field_data` json structure. this also specifies the
#: order of those fields in the config form.
common_settings = ('title', 'description', 'is_required')
def __init__(self, obj):
self.object = obj
@property
def validators(self):
"""Return a list of validators for this field"""
return None
@property
def wtf_field_kwargs(self):
"""Return a dict of kwargs for this field's wtforms field"""
return {}
def create_wtf_field(self):
"""Return a WTForms field for this field"""
return self._make_wtforms_field(self.wtf_field_class, self.validators, **self.wtf_field_kwargs)
@classmethod
def create_config_form(cls, *args, **kwargs):
"""Create the WTForm to configure this field"""
bases = (cls.config_form_base, cls.config_form) if cls.config_form is not None else (cls.config_form_base,)
form_cls = type(b'_FieldConfigForm', bases, {})
form = form_cls(*args, **kwargs)
form._common_fields = cls.common_settings
return form
def copy_field_data(self):
"""Return a copy of the field's configuration data"""
return deepcopy(self.object.field_data)
def is_value_empty(self, value):
"""Check whether the stored value is considered empty.
:param value: The database object containing the value of
the field
"""
return value.data is None
def update_object(self, data):
"""Update the object containing this field.
:param data: A dict containing already validated data from
form submission.
"""
for field in self.common_settings:
setattr(self.object, field, data[field])
self.object.field_type = self.name
self.object.field_data = {name: value
for name, value in data.iteritems()
if name not in self.common_settings and name != 'csrf_token'}
def get_friendly_value(self, value):
"""Return the human-friendly version of the field's value
:param value: The database object containing the value of
the field
"""
return va | lue if value is not None else ''
def _make_wtforms_field(self, field_cls, validators=None, **kwargs):
"""Util to instantiate a WTForms field.
This creates a field with the proper title, description and
if applicable a DataRequired validator.
|
:param field_cls: A WTForms field class
:param validators: A list of additional validators
:param kwargs: kwargs passed to the field constructor
"""
validators = list(validators) if validators is not None else []
if self.object.is_required:
validators.append(self.required_validator())
elif self.not_required_validator:
validators.append(self.not_required_validator())
return field_cls(self.object.title, validators, description=self.object.description, **kwargs)
|
angdraug/nova | nova/api/auth.py | Python | apache-2.0 | 6,074 | 0 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
from oslo.config import cfg
from oslo.middleware import request_id
from oslo.serialization import jsonutils
import webob.dec
import webob.exc
from nova import context
from nova.i18n import _
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova import wsgi
auth_opts = [
cfg.BoolOpt('api_rate_limit',
default=False,
help=('Whether to use per-user rate limiting for the api. '
'This option is only used by v2 api. Rate limiting '
'is removed from v3 api.')),
cfg.StrOpt('auth_strategy',
default='keystone',
help='The strategy to use for auth: noauth or key | stone.'),
cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
LOG = logging.getLogger(__name__)
def _load_pipeline(loader, pipeline):
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[- | 1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
# NOTE (Alex Xu): This is just for configuration file compatibility.
# If the configuration file still contains 'ratelimit_v3', just ignore it.
# We will remove this code at next release (J)
if 'ratelimit_v3' in pipeline:
LOG.warn(_LW('ratelimit_v3 is removed from v3 api.'))
pipeline.remove('ratelimit_v3')
return _load_pipeline(loader, pipeline)
def pipeline_factory_v21(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
return _load_pipeline(loader, local_conf[CONF.auth_strategy].split())
# NOTE(oomichi): This pipeline_factory_v3 is for passing check-grenade-dsvm.
pipeline_factory_v3 = pipeline_factory_v21
class InjectContext(wsgi.Middleware):
"""Add a 'nova.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['nova.context'] = self.context
return self.application
class NovaKeystoneContext(wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
roles = self._get_roles(req)
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
user_name = req.headers.get('X_USER_NAME')
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
ctx = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog,
request_id=req_id)
req.environ['nova.context'] = ctx
return self.application
def _get_roles(self, req):
"""Get the list of roles."""
if 'X_ROLES' in req.headers:
roles = req.headers.get('X_ROLES', '')
else:
# Fallback to deprecated role header:
roles = req.headers.get('X_ROLE', '')
if roles:
LOG.warn(_LW("Sourcing roles from deprecated X-Role HTTP "
"header"))
return [r.strip() for r in roles.split(',')]
|
dsibournemouth/autoweka | scripts/launch_default_experiments.py | Python | gpl-3.0 | 782 | 0.001279 | import argparse
import os
from config import *
def main():
parser = argparse | .Argume | ntParser(prog=os.path.basename(__file__))
globals().update(load_config(parser))
parser.add_argument('--dataset', choices=datasets, required=False)
args = parser.parse_args()
# override default values
if args.dataset:
selected_datasets = [args.dataset]
else:
selected_datasets = datasets
for d in selected_datasets:
for m in methods:
experiment_name = '%s.%s' % (d, m)
command = "qsub -N %s -l q=compute %s/scripts/default_experiment.sh %s %s" % (
experiment_name, os.environ['AUTOWEKA_PATH'], d, m)
print(command)
os.system(command)
if __name__ == "__main__":
main()
|
AlexisEidelman/Til | til/tests/test_liam2.py | Python | gpl-3.0 | 1,074 | 0.007449 | # -*- coding: ut | f-8 -*-
import os
import pkg_resources
from liam2.simulation import Simulation
def test_liam2_demo_files():
liam2_demo_directory = os.path.join(
pkg_resources.get_distribution('liam2').location,
'..',
'tests',
'examples'
)
excluded_files = [
'demo_import.yml', # non working example
'demo02.yml', # TODO: pb with figures
]
yaml_files = [os.path.join(liam2_demo_directory, _file) for _file in os.listdir(liam2_demo_directory) |
if os.path.isfile(os.path.join(liam2_demo_directory, _file))
and _file.endswith('.yml')
and _file not in excluded_files]
print yaml_files
for yaml_file in yaml_files:
print yaml_file
simulation = Simulation.from_yaml(
yaml_file,
input_dir = os.path.join(liam2_demo_directory),
# input_file = input_file,
output_dir = os.path.join(os.path.dirname(__file__), 'output'),
# output_file = output_file,
)
simulation.run(False)
|
apitrace/apitrace | specs/wglenum.py | Python | mit | 9,086 | 0.00011 | ##########################################################################
#
# Copyright 2008-2011 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPL | IED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""WGL enum description"""
from .stdapi import *
WGLenum = FakeEnum(Int, [
"WGL_GPU_VENDOR_AMD", # 0x1F00
"WGL_GPU_RENDERER_STRING_AMD", # 0x1F01
"WGL_GPU_OPENGL_VERSION_STRING_AMD", # 0x1F02
"WGL_NUMBER_PIXEL_FORMATS_ARB", # 0x2000
"WGL_DRAW_TO_WINDOW_ARB", # 0x2001
"WGL_DRAW_TO_BITMAP_ARB", # 0x2002
"WGL_ACCELERATION_ARB", # 0x2003
"WGL_NEED_PALETTE_ARB", # 0x2004
"WGL_NEED_SYSTEM_PALETTE_ARB", # 0x2005
"WGL_SWAP_LAYER_BUFFERS_ARB", # 0x2006
"WGL_SWAP_METHOD_ARB", # 0x2007
"WGL_NUMBER_OVERLAYS_ARB", # 0x2008
"WGL_NUMBER_UNDERLAYS_ARB", # 0x2009
"WGL_TRANSPARENT_ARB", # 0x200A
"WGL_TRANSPARENT_VALUE_EXT", # 0x200B
"WGL_SHARE_DEPTH_ARB", # 0x200C
"WGL_SHARE_STENCIL_ARB", # 0x200D
"WGL_SHARE_ACCUM_ARB", # 0x200E
"WGL_SUPPORT_GDI_ARB", # 0x200F
"WGL_SUPPORT_OPENGL_ARB", # 0x2010
"WGL_DOUBLE_BUFFER_ARB", # 0x2011
"WGL_STEREO_ARB", # 0x2012
"WGL_PIXEL_TYPE_ARB", # 0x2013
"WGL_COLOR_BITS_ARB", # 0x2014
"WGL_RED_BITS_ARB", # 0x2015
"WGL_RED_SHIFT_ARB", # 0x2016
"WGL_GREEN_BITS_ARB", # 0x2017
"WGL_GREEN_SHIFT_ARB", # 0x2018
"WGL_BLUE_BITS_ARB", # 0x2019
"WGL_BLUE_SHIFT_ARB", # 0x201A
"WGL_ALPHA_BITS_ARB", # 0x201B
"WGL_ALPHA_SHIFT_ARB", # 0x201C
"WGL_ACCUM_BITS_ARB", # 0x201D
"WGL_ACCUM_RED_BITS_ARB", # 0x201E
"WGL_ACCUM_GREEN_BITS_ARB", # 0x201F
"WGL_ACCUM_BLUE_BITS_ARB", # 0x2020
"WGL_ACCUM_ALPHA_BITS_ARB", # 0x2021
"WGL_DEPTH_BITS_ARB", # 0x2022
"WGL_STENCIL_BITS_ARB", # 0x2023
"WGL_AUX_BUFFERS_ARB", # 0x2024
"WGL_NO_ACCELERATION_ARB", # 0x2025
"WGL_GENERIC_ACCELERATION_ARB", # 0x2026
"WGL_FULL_ACCELERATION_ARB", # 0x2027
"WGL_SWAP_EXCHANGE_ARB", # 0x2028
"WGL_SWAP_COPY_ARB", # 0x2029
"WGL_SWAP_UNDEFINED_ARB", # 0x202A
"WGL_TYPE_RGBA_ARB", # 0x202B
"WGL_TYPE_COLORINDEX_ARB", # 0x202C
"WGL_DRAW_TO_PBUFFER_ARB", # 0x202D
"WGL_MAX_PBUFFER_PIXELS_ARB", # 0x202E
"WGL_MAX_PBUFFER_WIDTH_ARB", # 0x202F
"WGL_MAX_PBUFFER_HEIGHT_ARB", # 0x2030
"WGL_OPTIMAL_PBUFFER_WIDTH_EXT", # 0x2031
"WGL_OPTIMAL_PBUFFER_HEIGHT_EXT", # 0x2032
"WGL_PBUFFER_LARGEST_ARB", # 0x2033
"WGL_PBUFFER_WIDTH_ARB", # 0x2034
"WGL_PBUFFER_HEIGHT_ARB", # 0x2035
"WGL_PBUFFER_LOST_ARB", # 0x2036
"WGL_TRANSPARENT_RED_VALUE_ARB", # 0x2037
"WGL_TRANSPARENT_GREEN_VALUE_ARB", # 0x2038
"WGL_TRANSPARENT_BLUE_VALUE_ARB", # 0x2039
"WGL_TRANSPARENT_ALPHA_VALUE_ARB", # 0x203A
"WGL_TRANSPARENT_INDEX_VALUE_ARB", # 0x203B
"WGL_DEPTH_FLOAT_EXT", # 0x2040
"WGL_SAMPLE_BUFFERS_ARB", # 0x2041
"WGL_SAMPLES_ARB", # 0x2042
"ERROR_INVALID_PIXEL_TYPE_ARB", # 0x2043
"WGL_GENLOCK_SOURCE_MULTIVIEW_I3D", # 0x2044
"WGL_GENLOCK_SOURCE_EXTERNAL_SYNC_I3D", # 0x2045
"WGL_GENLOCK_SOURCE_EXTERNAL_FIELD_I3D", # 0x2046
"WGL_GENLOCK_SOURCE_EXTERNAL_TTL_I3D", # 0x2047
"WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D", # 0x2048
"WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D", # 0x2049
"WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D", # 0x204A
"WGL_GENLOCK_SOURCE_EDGE_RISING_I3D", # 0x204B
"WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D", # 0x204C
"WGL_GAMMA_TABLE_SIZE_I3D", # 0x204E
"WGL_GAMMA_EXCLUDE_DESKTOP_I3D", # 0x204F
"WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D", # 0x2050
"WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D", # 0x2051
"WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D", # 0x2052
"WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D", # 0x2053
"ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB", # 0x2054
"WGL_STEREO_EMITTER_ENABLE_3DL", # 0x2055
"WGL_STEREO_EMITTER_DISABLE_3DL", # 0x2056
"WGL_STEREO_POLARITY_NORMAL_3DL", # 0x2057
"WGL_STEREO_POLARITY_INVERT_3DL", # 0x2058
"WGL_SAMPLE_BUFFERS_3DFX", # 0x2060
"WGL_SAMPLES_3DFX", # 0x2061
"WGL_BIND_TO_TEXTURE_RGB_ARB", # 0x2070
"WGL_BIND_TO_TEXTURE_RGBA_ARB", # 0x2071
"WGL_TEXTURE_FORMAT_ARB", # 0x2072
"WGL_TEXTURE_TARGET_ARB", # 0x2073
"WGL_MIPMAP_TEXTURE_ARB", # 0x2074
"WGL_TEXTURE_RGB_ARB", # 0x2075
"WGL_TEXTURE_RGBA_ARB", # 0x2076
"WGL_NO_TEXTURE_ARB", # 0x2077
"WGL_TEXTURE_CUBE_MAP_ARB", # 0x2078
"WGL_TEXTURE_1D_ARB", # 0x2079
"WGL_TEXTURE_2D_ARB", # 0x207A
"WGL_MIPMAP_LEVEL_ARB", # 0x207B
"WGL_CUBE_MAP_FACE_ARB", # 0x207C
"WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB", # 0x207D
"WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB", # 0x207E
"WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB", # 0x207F
"WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB", # 0x2080
"WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB", # 0x2081
"WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB", # 0x2082
"WGL_FRONT_LEFT_ARB", # 0x2083
"WGL_FRONT_RIGHT_ARB", # 0x2084
"WGL_BACK_LEFT_ARB", # 0x2085
"WGL_BACK_RIGHT_ARB", # 0x2086
"WGL_AUX0_ARB", # 0x2087
"WGL_AUX1_ARB", # 0x2088
"WGL_AUX2_ARB", # 0x2089
"WGL_AUX3_ARB", # 0x208A
"WGL_AUX4_ARB", # 0x208B
"WGL_AUX5_ARB", # 0x208C
"WGL_AUX6_ARB", # 0x208D
"WGL_AUX7_ARB", # 0x208E
"WGL_AUX8_ARB", # 0x208F
"WGL_AUX9_ARB", # 0x2090
"WGL_CONTEXT_MAJOR_VERSION_ARB", # 0x2091
"WGL_CONTEXT_MINOR_VERSION_ARB", # 0x2092
"WGL_CONTEXT_LAYER_PLANE_ARB", # 0x2093
"WGL_CONTEXT_FLAGS_ARB", # 0x2094
"ERROR_INVALID_VERSION_ARB", # 0x2095
"ERROR_INVALID_PROFILE_ARB", # 0x2096
"WGL_CONTEXT_RELEASE_BEHAVIOR_ARB", # 0x2097
"WGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_ARB", # 0x2098
"WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV", # 0x20A0
"WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV", # 0x20A1
"WGL_TEXTURE_RECTANGLE_NV", # 0x20A2
"WGL_BIND_TO_TEXTURE_DEPTH_NV", # 0x20A3
"WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV", # 0x20A4
"WGL_DEPTH_TEXTURE_FORMAT_NV", # 0x20A5
"WGL_TEXTURE_DEPTH_COMPONENT_NV", # 0x20A6
"WGL_DEPTH_COMPONENT_NV", # 0x20A7
"WGL_TYPE_RGBA_UNSIGNED_FLOAT_EXT", # 0x20A8
"WGL_FRAMEBUFFER_SRGB_CAPABLE_ARB", # 0x20A9
"WGL_FLOAT_COMPONENTS_NV", # 0x20B0
"WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV", # 0x20B1
"WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV", # 0x20B2
"WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV", # 0x20B3
"WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV", # 0x20B4
"WGL_TEXTURE_FLOAT_R_NV", # 0x20B5
"WGL_TEXTURE_FLOAT_RG_NV", # 0x20B6
"WGL_TEXTURE_FLOAT_RGB_NV", # 0x20B7
"WGL_TEXTURE_FLOAT_RGBA_NV", # 0x20B8
"WGL_COLOR_SAMPLES_NV", # 0x20B9
"WGL_BIND_TO_VIDEO_RGB_NV", # 0x20C0
"WGL_BIND_TO_VIDEO_RGBA_NV", # 0x20C1
"WGL_BIND_TO_VIDEO_RGB_AND_DEPTH_NV", # 0x20C2
"WGL_VIDEO_OUT_COLOR_NV", # 0x20C3
"WGL_VIDEO_OUT_ALPHA_NV", # 0x20C4
"WGL_VIDEO_OUT_DEPTH_NV", # 0x20C5
"WGL_VIDEO_OUT_COLOR_AND_ALPHA_NV", # 0x20C6
"WGL_VIDEO_OUT_COLOR_AND_DEPTH_NV", # 0x20C7
"WGL_VIDEO_OUT_FRAME", # 0x20C8
"WGL |
endlessm/chromium-browser | native_client/src/trusted/service_runtime/linux/ld_bfd.py | Python | bsd-3-clause | 2,039 | 0.013242 | #!/usr/bin/python
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for invoking the BFD loader
A simple script to invoke the bfd loader instead of gold.
This script is in a filename "ld" so it can be invoked from gcc
via the -B flag.
"""
# TODO(bradchen): Delete this script when Gold supports linker scripts properly.
from __future__ import print_function
import os
import subprocess
i | mport sys
def PathTo(fname):
if fname[0] == os.pathsep:
return fname
for p in os.environ["PATH"].split(os.pathsep):
fpath = os.path.join(p, fname)
if os.path.exists(fpath):
return fpath
return fname
def GccPrintName(cxx_bin, what, switch, defresult):
popen = subprocess.Popen(cxx_bin + ' ' + switch,
| shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
result, error = popen.communicate()
if popen.returncode != 0:
print("Could not find %s: %s" % (what, error))
return defresult
return result.strip()
def FindLDBFD(cxx_bin):
ld = GccPrintName(cxx_bin, 'ld', '-print-prog-name=ld', 'ld')
ld_bfd = PathTo(ld + ".bfd")
if os.access(ld_bfd, os.X_OK):
return ld_bfd
return ld
def FindLibgcc(cxx_bin):
return GccPrintName(cxx_bin, 'libgcc', '-print-libgcc-file-name', None)
def main(args):
# Find path to compiler, either from the command line or the environment,
# falling back to just 'g++'
if '--compiler' in args:
index = args.index('--compiler')
cxx_bin = args[index + 1]
if not cxx_bin:
sys.stderr.write("Empty --compiler option specified\n")
return 1
del args[index:index + 2]
else:
cxx_bin = os.getenv('CXX', 'g++')
args = [FindLDBFD(cxx_bin)] + args
libgcc = FindLibgcc(cxx_bin)
if libgcc is not None:
args.append(libgcc)
return subprocess.call(args)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
jeremiak/regulations-site | regulations/generator/versions.py | Python | cc0-1.0 | 1,918 | 0.000521 | from datetime import datetime
from regulations.generator import api_reader
from regulations.generator.layers.utils import convert_to_python
def fetch_regulations_and_future_versions():
""" Returns a dict for all the regulations in the API. The dict includes
lists of future versions for each regulation. """
client = api_reader.ApiReader()
all_versions = client.all_regulations_versions()
all_versions = convert_to_python(all_versions)
regulations_future = {}
#We're only interested in future endpoint versions
for v in all_versions['versions']:
if v['regulation'] not in regulations_future:
regulations_future[v['regulation']] = []
if 'by_date' in v:
regulations_future[v['regulation']].append(v)
return regulations_future
def fetch_grouped_history(part):
client = api_reader.ApiReader()
versions = filter(lambda v: 'by_date' in v,
client.regversions(part)['versions'])
for version in versions:
version['notices'] = []
versions = sorted(convert_to_python(versions), reverse=True,
key=lambda v: v['by_date'])
today = datetime.today()
seen_current = False
for version in versions:
if version['by_date'] > today:
version['timeline'] = 'future'
elif not seen_current:
seen_current = True
version['timeline'] = 'current'
| else:
version['timeline'] = 'past'
for notice in client.notices(part)['results']:
notice = convert_to_python(notice)
for v in (v for v in versions
if v['by_date'] == notice['effective_on']):
v['notices'].append(notice)
for version in versions:
version['notices'] = sorted(version['notices'], reverse=True,
key=lambda n: n['publication_date'])
| return versions
|
DiCarloLab-Delft/PycQED_py3 | pycqed/measurement/VNA_module.py | Python | mit | 12,353 | 0.003319 | import numpy as np
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis_v2 import measurement_analysis as ma2
from pycqed.measurement import sweep_functions as swf
from pycqed.measurement import detector_functions as det
MC_instr = None
VNA_instr = None
def acquire_single_linear_frequency_span(file_name, start_freq=None,
stop_freq=None, center_freq=None,
| span=None, nr_avg=1, sweep_mode='auto',
nbr_points=101, power=-20,
bandwidth=100, measure='S21',
options_dict=None):
"""
Acquires a single trace from the VNA.
Inputs:
file_name (str), name of the output file.
start_freq (float), starting frequency of the trace.
stop_freq (float), stoping frequency of the trace.
center_freq (float), central frequency of the trace.
span (float), span of the trace.
nbr_points (int), Number of points within the trace.
power (float), power in dBm.
bandwidth (float), bandwidth in Hz.
measure (str), scattering parameter to measure (ie. 'S21').
Output:
NONE
Beware that start/stop and center/span are just two ways of configuring the
same thing.
"""
# set VNA sweep function
if start_freq != None and stop_freq != None:
MC_instr.set_sweep_function(swf.ZNB_VNA_sweep(VNA_instr,
start_freq=start_freq,
stop_freq=stop_freq,
npts=nbr_points,
force_reset=True))
elif center_freq != None and span != None:
MC_instr.set_sweep_function(swf.ZNB_VNA_sweep(VNA_instr,
center_freq=center_freq,
span=span,
npts=nbr_points,
force_reset=True))
# set VNA detector function
MC_instr.set_detector_function(det.ZNB_VNA_detector(VNA_instr))
# VNA settings
# VNA_instr.average_state('off')
VNA_instr.bandwidth(bandwidth)
# hack to measure S parameters different from S21
str_to_write = "calc:par:meas 'trc1', '%s'" % measure
print(str_to_write)
VNA_instr.visa_handle.write(str_to_write)
VNA_instr.avg(nr_avg)
VNA_instr.number_sweeps_all(nr_avg)
VNA_instr.average_mode(sweep_mode)
VNA_instr.power(power)
VNA_instr.timeout(10**4)
t_start = ma.a_tools.current_timestamp()
MC_instr.run(name=file_name)
t_stop = ma.a_tools.current_timestamp()
t_meas = ma.a_tools.get_timestamps_in_range(t_start, t_stop, label=file_name)
assert len(t_meas) == 1, "Multiple timestamps found for this measurement"
t_meas = t_meas[0]
# ma.Homodyne_Analysis(auto=True, label=file_name, fitting_model='hanger')
# ma.VNA_analysis(auto=True, label=file_name)
ma2.VNA_analysis(auto=True, t_start=None, options_dict=options_dict)
def acquire_current_trace(file_name):
"""
Acquires the trace currently displayed on VNA.
Inputs:
file_name (str), name of the output file.
Output:
NONE
"""
# get values from VNA
start_freq = VNA_instr.start_frequency()
stop_freq = VNA_instr.stop_frequency()
nbr_points = VNA_instr.npts()
power = VNA_instr.power()
bandwidth = VNA_instr.bandwidth()
current_sweep_time = VNA_instr.sweep_time()
print(current_sweep_time)
acquire_single_linear_frequency_span(file_name, start_freq=start_freq,
stop_freq=stop_freq,
nbr_points=nbr_points,
power=power, bandwidth=bandwidth)
def acquire_linear_frequency_span_vs_power(file_name, start_freq=None,
stop_freq=None, center_freq=None,
start_power=None, stop_power=None,
step_power=2,
span=None, nbr_points=101,
bandwidth=100, measure='S21'):
"""
Acquires a single trace from the VNA.
Inputs:
file_name (str), name of the output file.
start_freq (float), starting frequency of the trace.
stop_freq (float), stoping frequency of the trace.
center_freq (float), central frequency of the trace.
span (float), span of the trace.
nbr_points (int), Number of points within the trace.
start_power, stop_power, step_power (float), power range in dBm.
bandwidth (float), bandwidth in Hz.
measure (str), scattering parameter to measure (ie. 'S21').
Output:
NONE
Beware that start/stop and center/span are just two ways of configuring the
same thing.
"""
# set VNA sweep function
if start_freq != None and stop_freq != None:
swf_fct_1D = swf.ZNB_VNA_sweep(VNA_instr,
start_freq=start_freq,
stop_freq=stop_freq,
npts=nbr_points,
force_reset=True)
MC_instr.set_sweep_function(swf_fct_1D)
elif center_freq != None and span != None:
swf_fct_1D = swf.ZNB_VNA_sweep(VNA_instr,
center_freq=center_freq,
span=span,
npts=nbr_points,
force_reset=True)
MC_instr.set_sweep_function(swf_fct_1D)
if start_power != None and stop_power != None:
# it prepares the sweep_points, such that it does not complain.
swf_fct_1D.prepare()
MC_instr.set_sweep_points(swf_fct_1D.sweep_points)
MC_instr.set_sweep_function_2D(VNA_instr.power)
MC_instr.set_sweep_points_2D(np.arange(start_power,
stop_power+step_power/2.,
step_power))
else:
raise ValueError('Need to define power range.')
# set VNA detector function
MC_instr.set_detector_function(det.ZNB_VNA_detector(VNA_instr))
# VNA settings
VNA_instr.average_state('off')
VNA_instr.bandwidth(bandwidth)
# hack to measure S parameters different from S21
str_to_write = "calc:par:meas 'trc1', '%s'" % measure
print(str_to_write)
VNA_instr.visa_handle.write(str_to_write)
VNA_instr.timeout(600)
MC_instr.run(name=file_name, mode='2D')
# ma.Homodyne_Analysis(auto=True, label=file_name, fitting_model='hanger')
ma.TwoD_Analysis(auto=True, label=file_name)
def acquire_2D_linear_frequency_span_vs_param(file_name, start_freq=None,
stop_freq=None, center_freq=None,
parameter=None, sweep_vector=None,
span=None, nbr_points=101, power=-20,
bandwidth=100, measure='S21'):
"""
Acquires a single trace from the VNA.
Inputs:
file_name (str), name of the output file.
start_freq (float), starting frequency of the trace.
stop_freq (float), stoping frequency of the trace.
center_freq (float), central frequency of the trace.
span (float), span of the trace.
nbr_points (int), Number of points within the trace.
power (float), power in dBm.
bandwidth (float), bandwidth in Hz.
measure (str), scattering parameter to measure (ie. 'S21').
Output:
NONE
Beware that start/stop and center/span are just two ways of configuring the
same thing. | |
nathanaevitas/odoo | custom_addons/phaply/models.py | Python | agpl-3.0 | 1,552 | 0.011598 | # -*- coding: utf-8 -*-
from openerp import models, fields, api
class phaply(models.Model):
"""
This class will create a new table in DB with the same and replace the '.' with '_'
"""
_name = 'phaply.phaply'
name = fields.Char(string='Ten KH')
ngaysinh = fields.Date(string='Ngay sinh')
gioitinh = fields.Selection([('m', 'Nam'), ('f', 'Nu')], string='Gioi tinh')
quan = fields.Char(string='Quan')
ngaytiepnhan = fields.Datetime(String='Ngay gio tiep nhan')
email = fields.Char(string='Email')
phone = fields.Char(string='Phone')
nguon = fields.Many2one(string='Nguon' | , comodel_name='phaply.nguon')
dv_thietke = fields.Boolean(string='Da co thiet ke')
dv_thicong = fields.Boolean(string='Da co thi cong')
dv_doitac = fields.Char(string='Doi tac' | )
dichvu = fields.Many2one(string='Dich vu', comodel_name='phaply.dichvu')
phutrach = fields.Many2one(string='Nguoi phu trach', comodel_name='hr.employee')
da_chot = fields.Boolean(string='Da chot')
class nguon(models.Model):
"""
Luu tru nguon khach hang
"""
_name = 'phaply.nguon'
name = fields.Char(string='Nguon khach hang')
nguon_id = fields.One2many(string='Nguon ID', comodel_name='phaply.phaply', inverse_name='nguon')
class dichvu(models.Model):
"""
Dich vu phap ly
"""
_name = 'phaply.dichvu'
name = fields.Char(string='Dich vu')
dichvu_id = fields.One2many(string='Dich vu ID', comodel_name='phaply.phaply', inverse_name='dichvu') |
fengjz1/eloipool-litecoin | networkserver.py | Python | agpl-3.0 | 11,206 | 0.041585 | # Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2013 Luke Dashjr <luke-jr+eloipool@utopios.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asynchat
import logging
import os
import select
import socket
import threading
from time import time
import traceback
from util import ScheduleDict, WithNoop, tryErr
EPOLL_READ = select.EPOLLIN | select.EPOLLPRI | select.EPOLLERR | select.EPOLLHUP
EPOLL_WRITE = select.EPOLLOUT
class SocketHandler:
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
def handle_close(self):
self.wbuf = None
self.close()
def handle_error(self):
self.logger.debug(traceback.format_exc())
self.handle_close()
def handle_read(self):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error as why:
self.handle_error()
return
if self.closeme:
# All input is ignored from sockets we have "closed"
return
if isinstance(data, str) and self.use_encoding:
data = bytes(str, self.encoding)
self.ac_in_buffer = self.ac_in_buffer + data
self.server.lastReadbuf = self.ac_in_buffer
self.handle_readbuf()
collect_incoming_data = asynchat.async_chat._collect_incoming_data
get_terminator = asynchat.async_chat.get_terminator
set_terminator = asynchat.async_chat.set_terminator
def handle_readbuf(self):
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
elif isinstance(terminator, int):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
# NOTE: this supports multiple different terminators, but
# NOT ones that are prefixes of others...
if isinstance(self.ac_in_buffer, type(terminator)):
terminator = (terminator,)
termidx = tuple(map(self.ac_in_buffer.find, terminator))
try:
index = min(x for x in termidx if x >= 0)
except ValueError:
index = -1
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string (source of subtle bugs)
self.collect_incoming_data (self.ac_in_buffer[:index])
specific_terminator = terminator[termidx.index(index)]
terminator_len = len(specific_terminator)
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
termidx = tuple(map(lambda a: asynchat.find_prefix_at_end (self.ac_in_buffer, a), terminator))
index = max(termidx)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
def push(self, data):
if not len(self.wbuf):
# Try to send as much as we can immediately
try:
bs = self.socket.send(data)
except:
# Chances are we'll fail later, but anyway...
bs = 0
data = data[bs:]
if not len(data):
return
self.wbuf += data
self.server.register_socket_m(self.fd, EPOLL_READ | EPOLL_WRITE)
def handle_timeout( | self):
self.close()
def handle_write(self):
if self.wbuf is None:
# Socket was just closed by remote peer
return
bs = self.socket.send(self.wbuf)
self.wbuf = self.wbuf[bs:]
if not len(self.wbuf):
if self.closeme:
self.close()
return
self.se | rver.register_socket_m(self.fd, EPOLL_READ)
recv = asynchat.async_chat.recv
def close(self):
if self.wbuf:
self.closeme = True
return
if self.fd == -1:
# Already closed
return
try:
del self.server.connections[id(self)]
except:
pass
self.server.unregister_socket(self.fd)
self.changeTask(None)
self.socket.close()
self.fd = -1
def boot(self):
self.close()
self.ac_in_buffer = b''
def changeTask(self, f, t = None):
tryErr(self.server.rmSchedule, self._Task, IgnoredExceptions=KeyError)
if f:
self._Task = self.server.schedule(f, t, errHandler=self)
else:
self._Task = None
def __init__(self, server, sock, addr):
self.ac_in_buffer = b''
self.incoming = []
self.wbuf = b''
self.closeme = False
self.server = server
self.socket = sock
self.addr = addr
self._Task = None
self.fd = sock.fileno()
server.register_socket(self.fd, self)
server.connections[id(self)] = self
self.changeTask(self.handle_timeout, time() + 15)
@classmethod
def _register(cls, scls):
for a in dir(scls):
if a == 'final_init':
f = lambda self, x=getattr(cls, a), y=getattr(scls, a): (x(self), y(self))
setattr(cls, a, f)
continue
if a[0] == '_':
continue
setattr(cls, a, getattr(scls, a))
class NetworkListener:
logger = logging.getLogger('SocketListener')
def __init__(self, server, server_address, address_family = socket.AF_INET6):
self.server = server
self.server_address = server_address
self.address_family = address_family
tryErr(self.setup_socket, server_address, Logger=self.logger, ErrorMsg=server_address)
def _makebind_py(self, server_address):
sock = socket.socket(self.address_family, socket.SOCK_STREAM)
sock.setblocking(0)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
pass
sock.bind(server_address)
return sock
def _makebind_su(self, server_address):
if self.address_family != socket.AF_INET6:
raise NotImplementedError
from bindservice import bindservice
(node, service) = server_address
if not node: node = ''
if not service: service = ''
fd = bindservice(str(node), str(service))
sock = socket.fromfd(fd, socket.AF_INET6, socket.SOCK_STREAM)
sock.setblocking(0)
return sock
def _makebind(self, *a, **ka):
try:
return self._makebind_py(*a, **ka)
except BaseException as e:
try:
return self._makebind_su(*a, **ka)
except:
pass
raise
def setup_socket(self, server_address):
sock = self._makebind(server_address)
sock.listen(100)
self.server.register_socket(sock.fileno(), self)
self.socket = sock
def handle_read(self):
server = self.server
conn, addr = self.socket.accept()
if server.rejecting:
conn.close()
return
conn.setblocking(False)
h = server.RequestHandlerClass(server, conn, addr)
def handle_error(self):
# Ignore errors... like socket closing on the queue
pass
class _Waker:
def __init__(self, server, fd):
self.server = server
self.fd = fd
self.logger = logging.getLogger('Waker for %s' % (server.__class__.__name__,))
def handle_read(self):
data = os.read(self.fd, 1)
if not data:
self.logger.error('Got EOF on socket')
self.logger.debug('Read wakeup')
class AsyncSocketServer:
logger = logging.getLogger('SocketServer')
waker = False
schMT = False
def __i |
mxm/incubator-beam | sdks/python/apache_beam/testing/load_tests/co_group_by_key_test.py | Python | apache-2.0 | 7,004 | 0.004426 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is CoGroupByKey load test with Synthetic Source. Besides of the standard
input options there are additional options:
* project (optional) - the gcp project in case of saving
metrics in Big Query (in case of Dataflow Runner
it is required to specify project of runner),
* metrics_namespace (optional) - name of BigQuery table where metrics
will be stored,
in case of lack of any of both options metrics won't be saved
* input_options - options for Synthetic Sources
* co_input_options - options for Synthetic Sources.
Example test run on DirectRunner:
python setup.py nosetests \
--test-pipeline-options="
--project=big-query-project
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
To run test on other runner (ex. Dataflow):
python setup.py nosetests \
--test-pipeline-options="
--runner=TestDataflowRunner
--project=...
--staging_location=gs://...
--temp_location=gs://...
--sdk_location=./dist/apache-beam-x.x.x.dev0.tar.gz
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
"""
from __future__ import absolute_import
import json
import logging
import unittest
import apache_beam as beam
from apache_beam.testing import synthetic_pipeline
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
from apache_beam.testing.load_tests.load_test_metrics_utils import MetricsMonitor
from google.cloud import bigquery as bq
except ImportError:
bq = None
INPUT_TAG = 'pc1'
CO_INPUT_TAG = 'pc2'
RUNTIME_LABEL = 'runtime'
@unittest.skipIf(bq is None, 'BigQuery for storing metrics not installed')
class CoGroupByKeyTest(unittest.TestCase):
def parseTestPipelineOptions(self, options):
return {
'numRecords': options.get('num_records'),
'keySizeBytes': options.get('key_size'),
'valueSizeBytes': options.get('value_size'),
'bundleSizeDistribution': {
'type': options.get(
'bundle_size_distribution_type', 'const'
),
'param': options.get('bundle_size_distribution_param', 0)
},
'forceNumInitialBundles': options.get(
'force_initial_num_bundles', 0
)
}
def setUp(self):
self.pipeline = TestPipeline(is_integration_test=True)
self.input_options = json.loads(self.pipeline.get_option('input_options'))
self.co_input_options = json.loads(
self.pipeline.get_option('co_input_options'))
metrics_project_id = self.pipeline.get_option('project')
self.metrics_namespace = self.pipeline.get_option('metrics_table')
metrics_dataset = self.pipeline.get_option('metrics_dataset')
self.metrics_monitor = None
check = metrics_project_id and self.metrics_namespace and metrics_dataset\
is not None
if check:
measured_values = [{'name': RUNTIME_LABEL,
'type': 'FLOAT',
'mode': 'REQUIRED'}]
self.metrics_monitor = Metric | sMonitor(
project_name=metrics_project_id,
table=self.metrics_namespace,
dataset=metrics_dataset,
schema_map=measured_values
)
else:
logging.error('One or more of parameters for collecting metrics '
'are empty. Metrics will not be collected')
class _Ungroup(beam.DoFn):
def process(self | , element):
values = element[1]
inputs = values.get(INPUT_TAG)
co_inputs = values.get(CO_INPUT_TAG)
for i in inputs:
yield i
for i in co_inputs:
yield i
def testCoGroupByKey(self):
with self.pipeline as p:
pc1 = (p
| 'Read ' + INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.input_options)))
| 'Make ' + INPUT_TAG + ' iterable' >> beam.Map(lambda x: (x, x))
| 'Measure time: Start pc1' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
pc2 = (p
| 'Read ' + CO_INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.co_input_options)))
| 'Make ' + CO_INPUT_TAG + ' iterable' >> beam.Map(
lambda x: (x, x))
| 'Measure time: Start pc2' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
# pylint: disable=expression-not-assigned
({INPUT_TAG: pc1, CO_INPUT_TAG: pc2}
| 'CoGroupByKey: ' >> beam.CoGroupByKey()
| 'Consume Joined Collections' >> beam.ParDo(self._Ungroup())
| 'Measure time: End' >> beam.ParDo(MeasureTime(self.metrics_namespace))
)
result = p.run()
result.wait_until_finish()
if self.metrics_monitor is not None:
self.metrics_monitor.send_metrics(result)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
ONSdigital/ras-frontstage | frontstage/common/cryptographer.py | Python | mit | 2,059 | 0.001457 | from base64 import b64decode, b64encode
from hashlib import sha256
from Crypto import Random
from Crypto.Cipher import AES
from frontstage import app
class Cryptographer:
"""Manage the encryption and decryption of random byte strings"""
def __init__(self):
"""
| S | et up the encryption key, this will come from an .ini file or from
an environment variable. Change the block size to suit the data supplied
or performance required.
:param key: The encryption key to use when encrypting the data
"""
key = app.config["SECRET_KEY"]
self._key = sha256(key.encode("utf-8")).digest()
def encrypt(self, raw_text):
"""
Encrypt the supplied text
:param raw_text: The data to encrypt, must be a string of type byte
:return: The encrypted text
"""
raw_text = self.pad(raw_text)
init_vector = Random.new().read(AES.block_size)
ons_cipher = AES.new(self._key, AES.MODE_CBC, init_vector)
return b64encode(init_vector + ons_cipher.encrypt(raw_text))
def decrypt(self, encrypted_text):
"""
Decrypt the supplied text
:param encrypted_text: The data to decrypt, must be a string of type byte
:return: The unencrypted text
"""
encrypted_text = b64decode(encrypted_text)
init_vector = encrypted_text[:16]
ons_cipher = AES.new(self._key, AES.MODE_CBC, init_vector)
return self.unpad(ons_cipher.decrypt(encrypted_text[16:]))
def pad(self, data):
"""
Pad the data out to the selected block size.
:param data: The data were trying to encrypt
:return: The data padded out to our given block size
"""
vector = AES.block_size - len(data) % AES.block_size
return data + ((bytes([vector])) * vector)
def unpad(self, data):
"""
Un-pad the selected data.
:param data: Our padded data
:return: The data 'un'padded
"""
return data[0 : -data[-1]]
|
gnychis/grforwarder | gr-audio/examples/python/dial_tone_daemon.py | Python | gpl-3.0 | 2,052 | 0.00731 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GN | U Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICUL | AR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import os
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src0 = gr.sig_source_f (sample_rate, gr.GR_SIN_WAVE, 350, ampl)
src1 = gr.sig_source_f (sample_rate, gr.GR_SIN_WAVE, 440, ampl)
dst = audio.sink (sample_rate, options.audio_output)
self.connect (src0, (dst, 0))
self.connect (src1, (dst, 1))
if __name__ == '__main__':
pid = gru.daemonize()
print "To stop this program, enter 'kill %d'" % pid
my_top_block().run()
|
liuhll/BlogAndArticle | Notes/Python/src/exercise/string_repalce_by_resub.py | Python | mit | 537 | 0.02457 | # -*- coding:utf-8 -*-
from re import sub
f | rom itertools import islice
'''
ε¦δ½θ°ζ΄ε符串ηζζ¬ζ ΌεΌ
'''
# ε°ζ₯εΏζδ»ΆδΈηζ₯ζζ ΌεΌθ½¬εδΈΊηΎε½ζ₯ζζ ΌεΌmm/dd/yyyy
# δ½Ώη¨ζ£ε葨达εΌζ¨‘εδΈηsubε½ζ°θΏθ‘ζΏζ’ε符串
with open("./log.log","r") as f:
for line in islice(f,0,None):
#print sub("(\d{4})-(\d{2})-(\d{2})",r"\2/\3/\1",line)
# ε―δ»₯δΈΊζ―δΈͺεΉι
η»θ΅·δΈδΈͺε«ε
print sub("(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})",r"\g<month> | /\g<day>/\g<>",line)
|
CodeforChemnitz/TheaterWecker | scraper/app.py | Python | mit | 3,322 | 0.00301 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import calendar
import json
import locale
import re
import requests
from bs4 import BeautifulSoup
# Use German month names
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
URL = "http://www.theater-chemnitz.de/spielplan/repertoire"
BASE_URL = "http://www.theater-chemnitz.de/"
time_re = re.compile("(?P<hour>\d{2}):(?P<minutes>\d{2})(\s*Uhr\s*)")
url_id_re = re.compile("\/(?P<id>\d+)\/$")
calendar_months = [
'',
'januar',
'februar',
'marz',
'april',
'mai',
'juni',
'ju | li',
'august',
'september',
'oktober',
'november',
'dezember'
]
def get_plays(year, month):
plan = requests.get("{}/{}/{:02d}/".format(URL, year, month))
if plan.status_code != 200:
logger.error('got non-200 return code while scraping', exc_info=True)
return []
soup = BeautifulSoup(plan.text.replace(' ', ' '), "lxml")
news_items = soup.find_all("div", class_="cc_news_item")
plays = []
for block_top in news_items | :
date = block_top.find("div", class_="cc_news_date")
if date:
day = int(date.find(class_="cc_day").get_text().strip('.'))
play = {
"month": month,
"day": day,
"year": year
}
time_raw = date.find(class_="cc_timeresp").get_text().split(' ')[0].split(':')
play["hour"] = int(time_raw[0])
play["minutes"] = int(time_raw[1])
play["location"] = block_top.find(class_="cc_content").get_text()
# Gastspiel, Premiere etc.
special_raw = block_top.find(class_="cc_premiere")
if special_raw:
special = special_raw.get_text()
if special in ["Gastspiel"]: # Ausnahmen
continue
category_raw = date.find(class_="cc_type")
if category_raw:
category = category_raw.get_text()
if category in ["Theaternahes Rahmenprogramm"]:
category = "Sonstiges"
play["category"] = category
title_raw = block_top.find("h2")
if title_raw:
play["title"] = title_raw.get_text()
else:
continue
desciption_raw = block_top.find("h3")
if desciption_raw:
play["description"] = desciption_raw.get_text()
tickets_raw = block_top.find("a", class_="cc_ticket")
play["tickets"] = tickets_raw["href"] if tickets_raw else None
# ID & URL
id_raw = block_top.find(class_="cc_newscol2").find("a")["href"]
if id_raw:
play["url"] = "{}{}".format(BASE_URL, id_raw)
play["id"] = int(id_raw.strip("/").split("/")[-1])
plays.append(play)
if len(plays) == 0:
logger.error('could not find a single play while scraping', exc_info=True)
return plays
def main():
today = datetime.date.today()
plays = get_plays(today.year, today.month)
if today.month+1 == 13:
plays.extend(get_plays(today.year+1, 1))
else:
plays.extend(get_plays(today.year, today.month + 1))
for play in plays:
print(play)
if __name__ == "__main__":
main()
|
caesar2164/edx-platform | scripts/migrate_score_override.py | Python | agpl-3.0 | 9,522 | 0.002941 | #!/usr/bin/env python
"""
Script to update old score overrides to match upstream overrides.
"""
import datetime
import itertools
import os
import django
from django.db import transaction
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.monkey_patch import django_db_models_options
def main():
from django.contrib.auth.models import User
from openassessment.assessment.models import Assessment, AssessmentPart, StaffWorkflow
from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowStep
from student.models import anonymous_id_for_user, user_by_anonymous_id
from submissions.models import Score, ScoreSummary, ScoreAnnotation, Submission
old_scores = Score.objects.filter(submission__isnull=True, reset=False).order_by('id')
updated_count = 0
for score in old_scores:
try:
with transaction.atomic():
# ScoreSummary is updated on Score saves but for this script we don't want that.
# Correct way is to disconnect post_save signal, but since the receiver function
# is defined in the class, we can't reference it. Workaround here is to just
# prefetch the score summary and resave it to maintain its original field values.
score_summary = ScoreSummary.objects.get(student_item=score.student_item)
# Update old override with submission from the score preceding it.
# If none exists, look for it in the submissions table.
preceding_score = Score.objects.filter(
student_item=score.student_item,
created_at__lte=score.created_at,
submission__isnull=False,
).order_by('-created_at')[:1]
if preceding_score.count():
submission = preceding_score.get().submission
else:
submission_qset = Submission.objects.filter(student_item=score.student_item)
if submission_qset.count() > 1:
raise Exception("MULTIPLE SUBMISSIONS FOR STUDENT_ITEM {}".format(score.student_item))
else:
submission = submission_qset[:1].get()
score.submission = submission
score.save()
# Offset override reset by 1 second for convenience when sorting db
override_date = score.created_at - datetime.timedelta(seconds=1)
# Create reset score
Score.objects.create(
student_item=score.student_item,
submission=None,
points_earned=0,
points_possible=0,
created_at=override_date,
reset=True,
)
# Restore original score summary values
score_summary.save()
# Fetch staff id from score course for ScoreAnnotation
course_id = CourseKey.from_string(score.student_item.course_id)
staff = User.objects.filter(
courseaccessrole__role__in=['instructor', 'staff'],
courseaccessrole__course_id=course_id,
)[:1].get()
staff_id = anonymous_id_for_user(staff, course_id, save=False)
# Create ScoreAnnotation
score_annotation = ScoreAnnotation(
score=score,
annotation_type="staff_defined",
creator=staff_id,
reason="A staff member has defined the score for this submission",
)
score_annotation.save()
# ORA2 Table Updates...
# Fetch rubric from an existing assessment
assessment = Assessment.objects.filter(submission_uuid=submission.uuid)[:1].get()
rubric = assessment.rubric
staff_assessment = Assessment.create(
rubric=rubric,
scorer_id=staff_id,
submission_uuid=submission.uuid,
score_type="ST",
scored_at=override_date,
)
# Fake criterion selections
rubric_index = rubric.index
assessment_parts = []
criteria_without_options = rubric_index.find_criteria_without_options()
criteria_with_options = set(rubric_index._criteria_index.values()) - criteria_without_options
ordered_criteria = sorted(criteria_with_options, key=lambda criterion: criterion.order_num)
criteria_options = [c.options.all() for c in ordered_criteria]
# Just take the first combination of options which add up to the override point score
for selection in itertools.product(*criteria_options):
total = sum(option.points for option in selection)
if total == score.points_earned:
for option in selection:
assessment_parts.append({
'criterion': option.criterion,
'option': option
})
break
# Default to first option for each criteria if no matching sum found
if not assessment_parts:
print "NO CLEAN SUM FOR SUBMISSION " + submission.uuid
for options in criteria_options:
assessment_parts.append({
'criterion': options[0].criterion,
'option': options[0],
})
# Add feedback-only criteria
for criterion in criteria_without_options:
assessment_parts.append({
'criterion': criterion,
'option': None
})
AssessmentPart.objects.bulk_create([
AssessmentPart(
assessment=staff_assessment,
criterion=assessment_part['criterion'],
option=assessment_part['option'],
feedback=u""
)
for assessment_part in assessment_parts
])
try:
staff_workflow = StaffWorkflow.objects.get(submission_uuid=submission.uuid)
staff_workflow.assessment = staff_assessment.id
staff_workflow.grading_completed_at = override_date
except StaffWorkflow.DoesNotExist:
staff_workflow = StaffWorkflow(
scorer_id=staff_id,
course_id=score.student_item.course_id,
item_id=score.student_item.item_id,
submission_uuid=submission.uuid,
created_at=override_date,
grading_completed_at=override_date,
assessment=staff_assessment.id,
)
staff_workflow.save()
workflow = AssessmentWorkflow.get_by_submission_uuid(submission.uuid)
try:
staff_step = workflow.steps.get(name='staff')
staff_step.assessment_compl | eted_at = score.created_at
staff_step.submitter_completed_at = score.created_at
staff_step.save()
except AssessmentWorkflowStep.DoesNotExist:
for step in workflow.steps.all():
step.assessment_completed_at = score.created_at
step.submitter_completed_at = score.created_at
step.order_num += 1
| step.save()
workflow.steps.add(
AssessmentWorkflowStep(
name='staff',
order_num=0,
assessment_compl |
google-research/mint | mint/ctl/single_task_evaluator_test.py | Python | apache-2.0 | 2,149 | 0.001396 | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the single_task_evaluator."""
from mint.ctl import single_task_evaluator
from mint.ctl import single_task_trainer
from third_party.tf_models import orbit
import tensorflow as tf
import tensorflow_datasets as tfds
class SingleTaskEvaluatorTest(tf.test.TestCase):
def test_single_task_evaluation(self):
iris = tfds.load('iris')
train_ds = iris['train'].batch(32)
model = tf.keras.Sequential([
tf.keras.Input(shape=(4,), name='features'),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
trainer = single_task_trainer.Sing | leTaskTrainer(
train_ds,
label_key='label',
model=model,
loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(
learning_rate=tf.keras.optimizers.schedules.PiecewiseConstantDecay(
[0], [0.01, 0.01])))
evaluator = single_task_evaluator.SingleTaskEvaluator(
train_ds,
label_key='label',
model=model,
metrics=[tf.keras.metrics.SparseCategori | calAccuracy()])
controller = orbit.Controller(
trainer=trainer,
evaluator=evaluator,
steps_per_loop=100,
global_step=trainer.optimizer.iterations)
controller.train(train_ds.cardinality().numpy())
controller.evaluate()
accuracy = evaluator.metrics[0].result().numpy()
self.assertGreater(0.925, accuracy)
if __name__ == '__main__':
tf.test.main()
|
samdroid-apps/something-for-reddit | redditisgtk/main.py | Python | gpl-3.0 | 14,799 | 0.000609 | # Copyright 2016 Sam Parkinson <sam@sam.today>
#
# This file is part of Something for Reddit.
#
# Something for Reddit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Something for Reddit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Something for Reddit. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
from argparse import ArgumentParser
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import Soup
from redditisgtk.sublist import SubList
from redditisgtk.subentry import SubEntry
from redditisgtk.api import RedditAPI, APIFactory
from redditisgtk.webviews import (FullscreenableWebview, ProgressContainer,
WebviewToolbar)
from redditisgtk.readcontrolle | r import get_read_controller
from redditisgtk.identity import IdentityController
from r | edditisgtk.identitybutton import IdentityButton
from redditisgtk.comments import CommentsView
from redditisgtk.settings import get_settings, show_settings
from redditisgtk import webviews
VIEW_WEB = 0
VIEW_COMMENTS = 1
class RedditWindow(Gtk.Window):
def __init__(
self,
ic: IdentityController,
api_factory: APIFactory,
start_sub: str = None):
Gtk.Window.__init__(self, title='Something For Reddit',
icon_name='today.sam.reddit-is-gtk')
self.add_events(Gdk.EventMask.KEY_PRESS_MASK)
self.set_default_size(600, 600)
self.set_wmclass("reddit-is-gtk", "Something For Reddit")
self._ic = ic
self._ic.token_changed.connect(self._token_changed_cb)
self._api = None
self._api_factory = api_factory
settings = Gtk.Settings.get_default()
screen = Gdk.Screen.get_default()
css_provider = Gtk.CssProvider.get_default()
if settings.props.gtk_application_prefer_dark_theme:
css_provider.load_from_resource(
'/today/sam/reddit-is-gtk/style.dark.css')
else:
css_provider.load_from_resource(
'/today/sam/reddit-is-gtk/style.css')
context = Gtk.StyleContext()
context.add_provider_for_screen(screen, css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_USER)
self._paned = Gtk.Paned.new(Gtk.Orientation.HORIZONTAL)
self.add(self._paned)
self._paned.show()
self._webview = FullscreenableWebview()
self._webview_bin = ProgressContainer(self._webview)
self._comments = None
self._stack = Gtk.Stack()
self._stack.connect('notify::visible-child', self.__stack_child_cb)
self._paned.add2(self._stack)
#self._paned.child_set_property(self._stack, 'shrink', True)
self._stack.show()
self._sublist_bin = Gtk.Box()
self._paned.add1(self._sublist_bin)
self._sublist_bin.show()
self._sublist = None
self._make_header()
left = Gtk.SizeGroup(mode=Gtk.SizeGroupMode.HORIZONTAL)
left.add_widget(self._left_header)
left.add_widget(self._sublist_bin)
self._paned.connect('notify::position',
self.__notify_position_cb,
self._header_paned)
self._header_paned.connect('notify::position',
self.__notify_position_cb,
self._paned)
self._token_changed_cb(self._ic)
def _token_changed_cb(self, ic):
api = self._api_factory.get_for_token(self._ic.active_token)
if self._api != api:
self.connect_api(api)
def connect_api(self, api: RedditAPI):
start_sub = None
if start_sub is None:
start_sub = get_settings()['default-sub']
if self._api is not None:
# TODO: swap right panel
print('Swapping', self._api, 'for', api)
start_sub = self._sublist.get_uri()
# FIXME: do we need to disconnect the callbacks?
self._sublist.destroy()
self._subentry.destroy()
self._api = api
self._api.request_failed.connect(self.__request_failed_cb)
self._sublist = SubList(self._api, start_sub)
self._sublist.new_other_pane.connect(self.__new_other_pane_cb)
self._sublist_bin.add(self._sublist)
#self._paned.child_set_property(self._sublist, 'shrink', True)
self._sublist.show()
self._subentry = SubEntry(self._api, start_sub)
self._subentry.activate.connect(self.__subentry_activate_cb)
self._subentry.escape_me.connect(self.__subentry_escape_me_cb)
self._left_header.props.custom_title = self._subentry
self._subentry.show()
def __request_failed_cb(self, api, msg, info):
dialog = Gtk.Dialog(use_header_bar=True)
label = Gtk.Label(label=info)
dialog.get_content_area().add(label)
label.show()
dialog.add_button('Retry', Gtk.ResponseType.ACCEPT)
dialog.add_button(':shrug-shoulders:', Gtk.ResponseType.REJECT)
dialog.set_default_response(Gtk.ResponseType.ACCEPT)
dialog.props.transient_for = self
response = dialog.run()
if response == Gtk.ResponseType.ACCEPT:
self._api.resend_message(msg)
dialog.destroy()
def do_event(self, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
if isinstance(self.get_focus(), (Gtk.TextView, Gtk.Entry)):
return
if event.keyval == Gdk.KEY_F6:
self._subentry.focus()
return True
if event.keyval == Gdk.KEY_1:
self._sublist.focus()
return True
if event.keyval == Gdk.KEY_2:
self._stack.set_visible_child(self._comments)
self._comments.focus()
return True
if event.keyval == Gdk.KEY_3:
self._stack.set_visible_child(self._webview_bin)
self._webview.grab_focus()
return True
if event.state & Gdk.ModifierType.MOD1_MASK:
if event.keyval == Gdk.KEY_Left:
self._webview.go_back()
return True
if event.keyval == Gdk.KEY_Right:
self._webview.go_forward()
return True
def __new_other_pane_cb(self, sublist, link, comments, link_first):
if self._comments is not None:
self._stack.remove(self._comments)
self._stack.remove(self._webview_bin)
self._comments = comments
if self._comments is not None:
self._stack.add_titled(self._comments, 'comments', 'Comments')
self._comments.show()
self._stack.add_titled(self._webview_bin, 'web', 'Web')
self._webview_bin.show()
self._webview.show()
self._paned.position = 400 # TODO: constant
if link_first and link:
self._stack.set_visible_child(self._webview_bin)
self._webview.load_uri(link)
else:
self._stack.set_visible_child(self._comments)
if link is not None:
self._webview.load_when_visible(link)
def load_uri_from_label(self, uri):
is_relative = not uri.startswith('http')
is_reddit = re.match('https?:\/\/(www\.|np\.)?reddit\.com\/', uri)
if is_relative or is_reddit:
self.goto_reddit_uri(uri)
return
self._stack.set_visible_child(self._webview_bin)
self._webview.load_uri(uri)
def __notify_position_cb(self, caller, pspec, other):
other.props.position = caller.props.position
def _make_header(self):
|
SarahBA/b2share | b2share/modules/records/errors.py | Python | gpl-2.0 | 2,455 | 0.001222 | # -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""B2share records errors."""
from jsonschema.exceptions import ValidationError
from invenio_rest.errors import RESTValidationError, FieldError
class B2ShareRecordsError(RESTValidationError):
"""B2Share records error."""
class InvalidRecordError(B2ShareRecordsError):
"""Raise when a record is invalid."""
# TODO(edima): remove this when we have support for permissions
class AlteredRecordError(B2ShareRecordsError):
"""Raise when a re | cord update changes what is considered
immutable record data."""
class EpicPIDError(Exception):
"""Raise when a record has no community."""
class UnknownRecordType(B2ShareRecordsError):
"""Error raised when a record type cannot be determined.
The two main record types are "published record" and "deposit".
"""
class AnonymousDepositSearch(B2ShareRecordsError):
"""Error raised when an anonymous user tries to search for drafts."""
code = 401
description = 'Only auth | enticated users can search for drafts.'
def register_error_handlers(app):
@app.errorhandler(ValidationError)
def handle_validation_error(err):
field = '/'.join([str(x) for x in err.path])
if err.validator == 'required' or err.validator == 'additionalProperties':
try:
field = err.message.split('\'')[1]
except IndexError:
pass # ignore
return InvalidRecordError(errors=[
FieldError(field, err.message)
])
|
Xarthisius/girder | pytest_girder/pytest_girder/assertions.py | Python | apache-2.0 | 1,022 | 0 | import json
from .utils import getResponseBody
def assertStatus(response, code):
"""
Call this to assert that a given HTTP status code was returned.
:param response: | The response object.
:param code: The status code.
:type code: int or str
"""
# Hide tracebacks for this function within pytest
__tracebackhide__ = True
code = str(code)
if not response.output_status.startswith(code.encode()):
msg = 'Response status was %s, no | t %s.' % (response.output_status,
code)
if hasattr(response, 'json'):
msg += ' Response body was:\n%s' % json.dumps(
response.json, sort_keys=True, indent=4,
separators=(',', ': '))
else:
msg += 'Response body was:\n%s' % getResponseBody(response)
assert response.output_status.startswith(code.encode()), msg
def assertStatusOk(response):
__tracebackhide__ = True
return assertStatus(response, 200)
|
Goldcap/django-operis | operis/management/commands/generate-api.py | Python | mit | 11,299 | 0.014249 | import datetime
import sys
import os.path
import pprint
from inspect import getmembers, isclass
from collections import defaultdict
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db.models.base import ModelBase
from jinja2 import FileSystemLoader, Environment, PackageLoader, ChoiceLoader
from operis.log import log
from operis.utils import clean, convert, convert_friendly, underscore, firstLower
class Command(BaseCommand):
help = 'Creates Generic API Scaffolding'
logger = None
option_list = BaseCommand.option_list + (
make_option('--regenerate',
action='store_true',
dest='regenerate',
default=False,
help='Wipe Prior instances'),
)
def handle(self, *args, **options):
self.logger = log( self )
wipe = False
if options['regenerate']:
wipe = True
modules = map(__import__, settings.EMBER_MODELS)
model_history = []
model_instances = []
for model in modules:
for name, obj in getmembers(model.models):
if isclass(obj):
if isinstance(obj, ModelBase):
self.logger.log("Object Name is: %s",[obj.__name__],"notice")
if obj.__name__ in settings.EMBER_MODEL_EXCLUSIONS:
continue
field_history = []
fields_extended = []
for aname in obj._meta.get_all_field_names():
f, modelItem, direct, m2m = obj._meta.get_field_by_name(aname)
#self.logger.log("Field Name :: %s",[f.name],"info")
#self.logger.log("Field Class :: %s",[type(f).__name__],"info")
try:
assert(f.get_internal_type())
#self.logger.log("Field Type :: %s",[f.get_internal_type()],"debug")
except:
#for a in dir(f):
# print "%s = %s" % (a,getattr(f,a))
continue
#sys.exit(0)
if hasattr(obj ,"Ember") and hasattr(obj.Ember,'fields'):
if f.name not in obj.Ember.fields:
continue
if convert(f.name) not in field_history:
field = {}
field['name'] = convert(f.name)
field['name_underscore'] = underscore(f.name)
field['name_friendly'] = convert_friendly(f.name)
field['type'] = f.get_internal_type()
field['class'] = type(f).__name__
if field['type'] == "ForeignKey" or field['type'] == "ManyToManyField":
has_parent = True
field['parent'] = underscore(f.rel.to.__name__)
field['parent_class'] = f.rel.to.__name__
field['parent_class_app'] = str(f.rel.to._meta.app_label)
field_history.append(convert(f.name))
fields_extended.append(field)
#print fields_extended
| index_list = ['id']
index_converted = ''
fields = ['id']
fields_converted = ''
filter_fields = ['id']
filter_fields_converted = ''
search_fields = ['id']
search_fields_converted = ''
|
singular = None
plural = None
fixture_seed = 1
if hasattr(obj._meta ,"verbose_name"):
singular = str(obj._meta.verbose_name)
else:
singular = obj.__name__.title()
if singular.lower() in model_history:
continue
if hasattr(obj._meta ,"verbose_name_plural"):
plural = str(obj._meta.verbose_name_plural)
else:
plural = obj.__name__
#Add to our Plural-Item Controllers
if hasattr(obj ,"Ember"):
if hasattr(obj.Ember,'fields'):
fields = []
for f in obj.Ember.fields:
fields.append(convert(f))
if hasattr(obj.Ember,'index_list'):
index_list = []
for f in obj.Ember.index_list:
index_list.append(convert(f))
if hasattr(obj.Ember,'filter_fields'):
filter_fields = obj.Ember.filter_fields
if hasattr(obj.Ember,'search_fields'):
search_fields = obj.Ember.search_fields
if hasattr(obj.Ember,'fixture_seed'):
fixture_seed = obj.Ember.fixture_seed
fields_converted = "fields = ('" + "','".join(fields) + "')"
index_converted = "fields = ('" + "','".join(index_list) + "')"
filter_fields_converted = "filter_fields = ['" + "','".join(filter_fields) + "']"
search_fields_converted = "search_fields = ['" + "','".join(search_fields) + "']"
item = { "model": name,
"fixture_seed": fixture_seed,
"singular": clean(singular),
"singular_converted": convert(singular),
"plural": clean(plural),
"plural_converted": convert(plural),
"emberCase": firstLower(plural),
"index_converted": index_converted,
"fields": fields_extended,
"fields_converted": fields_converted,
"filter_fields_converted": filter_fields_converted,
"search_fields_converted": search_fields_converted,
}
model_history.append(singular.lower())
model_instances.append(item)
print "=============================="
#print obj.__name__
#sys.exit(0)
global_exts = getattr(settings, 'JINJA_E |
hombin/kickstarter | KickStarter/middlewares.py | Python | mit | 1,909 | 0 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class KickstarterSpiderMiddleware(object):
# Not all methods need to be define | d. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for | each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnβt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
zlatiadam/PyPortfolio | pyportfolio/__init__.py | Python | gpl-2.0 | 863 | 0.008111 | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 21:34:36 2015
@author: Zlati
"""
from .portfolios.offline.equal_weights_portfoli | o import EqualWeightsPor | tfolio
from .portfolios.offline.tangency_portfolio import TangencyPortfolio
from .portfolios.offline.minimum_variance_portfolio import MinimumVariancePortfolio
from .portfolios.offline.smallest_eigenvalue_portfolio import SmallestEigenvaluePortfolio
from .portfolios.offline.mean_variance_portfolio import MeanVariancePortfolio
from .portfolios.offline.most_diversified_portfolio import MostDiversifiedPortfolio
from .portfolios.offline.mean_VaR_portfolio import MeanVaRPortfolio
from .portfolios.offline.mean_CVaR_portfolio import MeanCVaRPortfolio
#from pyportfolio.portfolios.base import Portfolio
#df = pd.read_csv("C:\\Users\\Zlati\\Desktop\\PyPortfolio\\pyportfolio\\data\\SP500_daily_2000_2015.csv") |
kiddingmu/leetcode | 43_valid_palindrome/valid_palindrome.py | Python | gpl-2.0 | 509 | 0.007859 | class Solution:
# @param s, a string
# @return a boolean
def isPalindrome(self, s):
s = [x.lower() for x in s if x.isalpha() or x.isdigit()]
size = len(s)
for i in xrange(size):
if s[i] != s[size-1-i]:
ret | urn False
return True
if __name__ == "__main__":
solution = Solution()
#s = "A man, a plan, a canal: Panama" |
#s = "race a car"
#s = "1a2"
s = "A man a plan, 11a canal: Panama"
print solution.isPalindrome(s)
|
madduck/reclass | reclass/constants.py | Python | artistic-2.0 | 444 | 0.002268 | #
# -*- coding: utf-8 -*-
#
# Thi | s file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright Β© 2007β14 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
class _Constant(object):
def __init__(self, displayname):
self._repr = display | name
__str__ = __repr__ = lambda self: self._repr
MODE_NODEINFO = _Constant('NODEINFO')
MODE_INVENTORY = _Constant('INVENTORY')
|
TimothyXu/nurdCase | nurdCase.py | Python | gpl-3.0 | 2,385 | 0.012998 | #!/usr/bin/env python3
#######################################################################
#Copyright (C) 2015 Timothy Xu #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>.#
#######################################################################
import re, sys
def scan():
"""Prints out matches."""
lineNum = 1
for line in sys.stdin.readlines():
for match in re.finditer(r"\b([A-Z][a-z]+){2,}\b", line): #for ALL matches in this line
#prints out respectively line number, starting char position, end char position, the text matched:
print('line %d %02d-%02d %s' % (lineNum, match.start(), match.end(), match.group(0)))
lineNum += 1
def write():
"""Replaces matches."""
with open('converted', 'w') as f:
for line in sys.stdin.readlines():
f.write(re.sub(r"\b([A-Z][a-z]+){2,}\b", repl, line)) #replaces matches according to repl()
f.close()
def repl(m):
return (m.group(0)[0].lower() + m.group(0)[1:]) #lowercase of first char and the rest of the match
try:
mode = sys.argv[1]
assert mode
except IndexError or EOFError or OSError or AssertionError:
print('Usage:\nPrint matches (dry run): cat /path/to/file | /path/to/this/script. | py -s(can)\nWrite to \'converted\' in cwd: cat /path/to/file | /path/to/this/script | .py -w(rite)')
else:
if mode == '-s' or mode == '-scan':
scan()
if mode == '-w' or mode == '-write':
write() |
chrsbats/connorav | connorav/__init__.py | Python | mit | 138 | 0.007246 | from __future__ import absolute_import
from .distribution import MSSKDistribution
from .correl_rv import CorrelatedNo | nNormal | RandomVariates |
aluminiumgeek/horo-modules | test/test.py | Python | gpl-3.0 | 370 | 0.002703 | # test.py (c) Mikhail Mezyakov <mihail265@gmail.com>
# Released under the | GNU GPL v.3
#
# | Module sends "success" message to user on a channel
def horo(channel, user, args):
"""Send "success" message if everything is ok"""
return u'PRIVMSG {channel} :{user}: success'.format(channel=channel,
user=user)
|
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/analysis/heating/cell.py | Python | mit | 30,102 | 0.00485 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** Β© Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.analysis.celldustheating Contains the CellDustHeatingAnalyser class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
import rpy2.robjects as ro
# Import the relevant PTS classes and modules
from .component import DustHeatingAnalysisComponent
from ....core.tools import filesystem as fs
from ....core.tools.logging import log
from ....core.tools import tables, introspection
from ....core.simulation.table import SkirtTable
from ....core.basics.distribution import Distribution, Distribution2D
from ....core.simulation.wavelengthgrid import WavelengthGrid
# -----------------------------------------------------------------
class CellDustHeatingAnalyser(DustHeatingAnalysisComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(CellDustHeatingAnalyser, self).__init__(config)
# -- Attributes --
# The wavelength grid used for the simulations
self.wavelength_grid = None
# The number of wavelengths
self.number_of_wavelengths = None
# The table with the cell properties
self.cell_properties = None
# The table with the absorbed luminosities
self.absorptions = None
# The mask of cells for which the total absorbed luminosity is zero
self.zero_absorption = None
# The heating fraction of the unevolved stellar population for each dust cell
self.heating_fractions = None
# The distribution of heating fractions
self.distribution = None
# The 2D distribution of heating fractions
self.radial_distribution = None
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new HeatingAnalyser instance
analyser = cls()
# Set the modeling path
analyser.config.path = arguments.path
# Return the new instance
return analyser
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the wavelength grid
self.load_wavelength_grid()
# 3. Load the cell properties
self.load_cell_properties()
# 4. Load the absorption data
self.load_absorption()
# 5. Calculate the heating fraction of the unevolved stellar population
self.calculate_heating_unevolved()
# 6. Calculate the distribution of the heating fraction of the unevolved stellar population
self.calculate_distribution()
# 7. Calculate the distribution of the heating fraction of the unevolved stellar population as a function of radius
self.calculate_radial_distribution()
# 8. Writing
self.write()
# 9. Plotting
self.plot()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(CellDustHeatingAnalyser, self).setup()
# -----------------------------------------------------------------
def load_wavelength_grid(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the wavelength grid file produced by SKIRT ...")
# Determine the path to the wavelength grid file in heating/total
wavelengths_path = fs.join(self.output_paths["total"], self.galaxy_name + "_wavelengths.dat")
# Load the wavelength grid as a table
self.wavelength_grid = WavelengthGrid.from_skirt_output(wavelengths_path)
# Determine the number of wavelengths
self.number_of_wavelengths = len(self.wavelength_grid)
# -----------------------------------------------------------------
def load_cell_properties(self):
"""
This function ...
:return:
"""
# Inform the suer
log.info("Loading the dust cell properties ...")
# M81_ds_cellprops.dat
# column 1: volume (pc3)
# column 2: density (Msun/pc3)
# column 3: mass fraction
# column 4: optical depth
# Determine the path to the cell properties file in heating/total
properties_path = fs.join(self.output_paths["total"], self.galaxy_name + "_ds_cellprops.dat")
# Load the properties table
self.cell_properties = SkirtTable.from_file(properties_path)
# -----------------------------------------------------------------
def load_absorption(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the absorption data ...")
# Header:
# Bolometric absorbed luminosity for all dust cells
# column 1: x coordinate of cell center (pc)
# column 2: y coordinate of cell center (pc)
# column 3: | z coordinate of cell center (pc)
# column 4: Absorbed bolometric luminosity (W)
contribution_tables = dict()
# Loop over the different contributions
for contribution in | self.contributions:
# Skip the simulation of the total unevolved (young + ionizing) stellar population
if contribution == "unevolved": continue
# Debugging
log.debug("Loading the SKIRT absorption table for the simulation of the " + contribution + " stellar population ...")
# Determine the path to the output directory of the simulation
output_path = self.output_paths[contribution]
# Determine the path to the absorption data file
absorption_path = fs.join(output_path, self.galaxy_name + "_ds_abs.dat")
# Load the absorption table for this contribution
table = SkirtTable.from_file(absorption_path)
# Add the table
contribution_tables[contribution] = table
do_checks = False
if do_checks:
# Debugging
log.debug("Checking whether the tables are consistent ...")
# Check whether the table lengths match
table_lengths = [len(table) for table in contribution_tables.values()]
if not all(table_lengths[0] == length for length in table_lengths): raise ValueError("Absorption tables have different sizes")
# Check whether the X coordinates of the cells match
if not tables.equal_columns([table["X coordinate of cell center"] for table in contribution_tables.values()]):
raise ValueError("Columns of X coordinates of cell centers do not match between the different contributions")
# Check whether the Y coordinates of the cells match
if not tables.equal_columns([table["Y coordinate of cell center"] for table in contribution_tables.values()]):
raise ValueError("Columns of Y coordinates of cell centers do not match between the different contributions")
# Check whether the Z coordinates of the cells match
if not tables.equal_columns([table["Z coordinate of cell center"] for table in contribution_tables.values()]):
raise ValueError("Columns of Z coordinates of cell cen |
ActiveState/code | recipes/Python/498261_Deeply_applying_str_across/recipe-498261.py | Python | mit | 7,274 | 0.004812 | """Utilities for handling deep str()ingification.
Danny Yoo (dyoo@hkn.eecs.berkeley.edu)
Casual Usage:
from deepstr import deep_str
print deep_str(["hello", "world"])
Very unusual usage:
import deepstr
import xml.sax.saxutils
def handle_list(obj, deep_str):
if isinstance(obj, list):
results = []
results.append("<list>")
results.extend(["<item>%s</item>" % deep_str(x)
for x in obj])
results.append("</list>")
return ''.join(results)
def handle_int(obj, deep_str):
if isinstance(obj, int):
return "<int>%s</int>" % obj
def handle_string(obj, deep_str):
if isinstance(obj, str):
return ("<string>%s</string>" %
xml.sax.saxutils.escape(obj))
def handle_default(obj):
return "<unknown/>"
silly = deepstr.DeepStr(handle_default)
silly.recursive_str = deepstr.make_shallow_recursive_str(
"<recursion-detected/>")
silly.register(handle_list)
silly.register(handle_int)
silly.register(handle_string)
print silly([3, 1, "four", [1, "<five>", 9.0]])
x = []
x.append(x)
print silly(x)
This module provides a function called deep_str() that will do a deep
str() on objects. This module also provides utilities to develop
custom str() functions.
(What's largely undocumented is the fact that this isn't really about
strings, but can be used as a general --- and convoluted --- framework
for mapping some process across data.)
"""
import unittest
def make_shallow_recursive_str(recursion_label):
def f(obj, deep_str):
return recursion_label
return f
class DeepStr:
"""Deep stringifier."""
def __init__(self,
default_str=str,
recursive_str=make_shallow_recursive_str("...")):
"""
DeepStr: stringify_function handler -> stringify_function
Creates a new DeepStr. Once constructed, you can call as
if this were a function that takes objects and returns
strings.
default_str is the default function used on types that this
does not recognize. It must be able to take in an object and
return a string.
If we hit structure that's already been traversed,
we use recursive_str on that structure."""
self.handlers = []
self.default_str = default_str
self.recursive_str = recursive_str
def __call__(self, obj):
"""Takes an object and returns a string of that object."" | "
return self.deepstr(obj, {})
def deepstr(self, obj, seen):
## Notes: this code is a little trickier than I'd like, but
## I don't see a good way of simplifying it yet. Subtle parts
## include the construction of substructure_deepstr, and | use
## of a fresh dictionary in 'new_seen'.
if id(obj) in seen:
## TRICKY CODE: the recursive function is taking in a
## stringifier whose 'seen' dictionary is empty.
def fresh_deepstr(sub_obj):
return self.deepstr(sub_obj, {})
return self.recursive_str(obj, fresh_deepstr)
def substructure_deepstr(sub_obj):
new_seen = dict(seen)
new_seen[id(obj)] = True
return self.deepstr(sub_obj, new_seen)
for h in self.handlers:
result = h(obj, substructure_deepstr)
if result != None:
return result
return self.default_str(obj)
def register(self, handler):
"""register: (object str_function -> string or None)
Registers a new handler type. Handers take in the object
as well as a str() function, and returns either a string if
it can handle the object, or None otherwise. The second
argument should be used on substructures."""
self.handlers.append(handler)
######################################################################
## Below here is a sample implementation for deep_str()
def handle_list(obj, deep_str):
if isinstance(obj, list):
return "[" + ", ".join([deep_str(x) for x in obj]) + "]"
return None
def handle_tuple(obj, deep_str):
if isinstance(obj, tuple):
return "(" + ", ".join([deep_str(x) for x in obj]) + ")"
return None
def handle_dict(obj, deep_str):
if isinstance(obj, dict):
return ("{" +
", ".join([deep_str(k) + ': ' + deep_str(v)
for (k, v) in obj.items()]) +
"}")
return None
def handle_recursion(obj, deep_str):
if isinstance(obj, list): return "[...]"
## tuples aren't handled; from my best understanding,
## it's not possible to construct a tuple that contains itself.
if isinstance(obj, dict): return "{...}"
return "..."
deep_str = DeepStr(str, handle_recursion)
deep_str.register(handle_list)
deep_str.register(handle_tuple)
deep_str.register(handle_dict)
######################################################################
## Sample exercising code. This is here just to show a wacky example.
def _exercise():
import xml.sax.saxutils
def handle_list(obj, deep_str):
if isinstance(obj, list):
results = []
results.append("<list>")
results.extend(["<item>%s</item>" % deep_str(x)
for x in obj])
results.append("</list>")
return ''.join(results)
def handle_int(obj, deep_str):
if isinstance(obj, int):
return "<int>%s</int>" % obj
def handle_string(obj, deep_str):
if isinstance(obj, str):
return "<string>%s</string>" % xml.sax.saxutils.escape(obj)
def handle_default(obj):
return "<unknown/>"
silly = DeepStr(handle_default)
silly.recursive_str = make_shallow_recursive_str(
"<recursion-detected/>")
silly.register(handle_list)
silly.register(handle_int)
silly.register(handle_string)
print silly([3, 1, "four", [1, "<five>", 9.0]])
x = []
x.append(x)
print silly(x)
######################################################################
## Test cases
class MyTests(unittest.TestCase):
def testSimpleThings(self):
for obj in [42, 'hello', 0+1j, 2.3, u'world']:
self.assertEquals(str(obj), deep_str(obj))
def testSimpleLists(self):
self.assertEquals(str([1, 2, 3]), deep_str([1, 2, 3]))
def testListsWithStrings(self):
self.assertEquals("[hello, world]", deep_str(["hello", "world"]))
def testRepeatedObjects(self):
self.assertEquals("[1, 1]", deep_str([1, 1]))
def testRecursion(self):
L = [1, 2]
L.append(L)
self.assertEquals("[1, 2, [...]]", deep_str(L))
def testSimpleDict(self):
self.assertEquals("{hello: world}", deep_str({'hello' : 'world'}))
def testDictWithRecursion(self):
D = {}
D[1] = D
self.assertEquals("{1: {...}}", deep_str(D))
def testNonRecursion(self):
a = ['a']
L = [a, a]
self.assertEquals("[[a], [a]]", deep_str(L))
if __name__ == '__main__':
unittest.main()
|
cstipkovic/spidermonkey-research | testing/mozharness/configs/web_platform_tests/prod_config_windows.py | Python | mpl-2.0 | 1,629 | 0.001842 | # ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
# This is a template config file for web-platform-tests test.
import os
import sys
config = {
"options": [
"-- | prefs-root=%(test_path)s/prefs",
"--processes=1",
"--config=%(test_path)s/wptrunner.ini",
"--ca-cert-path=%(test_path)s/certs/cacert.pem",
"--host-key-path=%(test_path)s/certs/web-platform.test.key",
"--host-cer | t-path=%(test_path)s/certs/web-platform.test.pem",
"--certutil-binary=%(test_install_path)s/bin/certutil",
],
"exes": {
'python': sys.executable,
'virtualenv': [sys.executable, 'c:/mozilla-build/buildbotve/virtualenv.py'],
'hg': 'c:/mozilla-build/hg/hg',
'mozinstall': ['%s/build/venv/scripts/python' % os.getcwd(),
'%s/build/venv/scripts/mozinstall-script.py' % os.getcwd()],
'tooltool.py': [sys.executable, 'C:/mozilla-build/tooltool.py'],
},
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
"buildbot_json_path": "buildprops.json",
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file" : os.path.join(os.getcwd(), "oauth.txt"),
"download_minidump_stackwalk": True,
}
|
sandordargo/family-tree | tests/test_horizontal_sorter.py | Python | mit | 14,566 | 0.004668 | import unittest
from tree import horizontal_sorter
from tree import person_node
class TestHorizontalSorter(unittest.TestCase):
def setUp(self):
self.edges = {}
@staticmethod
def only_even_numbers_in_dict(dict_to_check):
for key in dict_to_check:
if dict_to_check[key] % 2 != 0:
return False
return True
@staticmethod
def build_person_nodes_dict(person_horizontal_position_dict):
person_nodes = dict()
for person_id, position in person_horizontal_position_dict.iteritems():
new_person = person_node.PersonNode()
new_person.person_id = int(person_id)
new_person.horizontal_position = position
person_nodes[int(person_id)] = new_person
return person_nodes
@staticmethod
def assert_list_differences(list_to_check, difference_between_adjacent_elements):
"""
True if the difference between each adjacent element in the given one
There should be len(list)-1 of occurences of the given difference if we check all the possible combinationss
between the passed positions
This is not efficient for long lists, but we don't expect to have so many siblings
:param list_to_check: List of horizontal positions
:param difference_between_adjacent_elements:
:return: True if assertion is valid, False otherwise
"""
differences = dict()
for n in range(0, len(list_to_check), 1):
for m in range(1, len(list_to_check[n:]), 1):
diff = abs(list_to_check[n] - list_to_check[n+m])
if diff in differences:
differences[diff] += 1
else:
differences[diff] = 1
print(differences)
difference_existance = 1
for n in range(len(list_to_check) - 1, 0, -1):
if differences[n * difference_between_adjacent_elements] != difference_existance:
return False
difference_existance += 1
return True
def test_move(self):
position_person_dict = {1: ['5', '3'], 2: ['1', '2'], 3: ['4']}
person_level_dict = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 3}
person_horizontal_position_dict = {'1': 2, '3': 1, '2': 2, '5': 1, '4': 3}
person_nodes = self.build_person_nodes_dict(person_horizontal_position_dict)
sorter = horizontal_sorter.HorizontalSorter(person_level_dict, self.edges, person_nodes)
sorter.person_horizontal_position_dict = person_horizontal_position_dict
sorter.position_person_dict = position_person_dict
sorter.move('4', 2)
expected_position_person_dict = {1: ['5', '3'], 2: ['1', '2'], 3: [], 5: ['4']}
expected_person_horizontal_position_dict = {'1': 2, '3': 1, '2': 2, '5': 1, '4': 5}
self.assertEqual(expected_position_person_dict, sorter.position_person_dict)
self.assertEqual(expected_person_horizontal_position_dict, sorter.person_horizontal_position_dict)
def test_move_to_occupied(self):
position_person_dict = {1: ['5'], 2: ['1', '2'], 3: ['4', '3']}
person_level_dict = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 3}
| person_horizontal_position_dict = {'1': 2, '3': 3, '2': 2, '5': 1, '4': 3}
person_nodes = self.build_person_nodes_dict(person_horizontal_position_dict)
sorter = horizontal_sorter.HorizontalSorter(person_level_dict, self.edges, person_nodes)
sorter.person_horizontal_positi | on_dict = person_horizontal_position_dict
sorter.position_person_dict = position_person_dict
sorter.move('5', 2)
expected_position_person_dict = {1: [], 2: ['1', '2'], 3: ['4', '5'], 5: ['3']}
expected_person_horizontal_position_dict = {'1': 2, '3': 5, '2': 2, '5': 3, '4': 3}
self.assertEqual(expected_position_person_dict, sorter.position_person_dict)
self.assertEqual(expected_person_horizontal_position_dict, sorter.person_horizontal_position_dict)
def test_put_siblings_next_each_other_bottom_up(self):
position_person_dict = {1: ['5'], 2: ['1', '2'], 3: ['4'], 5: ['3']}
person_level_dict = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 3}
person_horizontal_position_dict = {'1': 2, '3': 5, '2': 2, '5': 1, '4': 3}
person_nodes = self.build_person_nodes_dict(person_horizontal_position_dict)
person_nodes[3].siblings = ['5']
person_nodes[5].siblings = ['3']
sorter = horizontal_sorter.HorizontalSorter(person_level_dict, self.edges, person_nodes)
sorter.person_horizontal_position_dict = person_horizontal_position_dict
sorter.position_person_dict = position_person_dict
sorter.put_siblings_next_each_other_bottom_up()
expected_difference = 2
positions_to_check = list()
positions_to_check.append(sorter.person_horizontal_position_dict['5'])
positions_to_check.append(sorter.person_horizontal_position_dict['3'])
self.assertTrue(self.assert_list_differences(positions_to_check, expected_difference))
#retrieved_difference = abs(sorter.person_horizontal_position_dict['5'] - sorter.person_horizontal_position_dict['3'])
#self.assertEqual(expected_difference, retrieved_difference)
def test_put_siblings_next_each_other_bottom_up_three_bros(self):
position_person_dict = {1: ['5'], 2: ['1', '2'], 3: ['4', '6'], 5: ['3']}
person_level_dict = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 3, '6': 3}
person_horizontal_position_dict = {'1': 2, '3': 5, '2': 2, '5': 1, '4': 3, '6': 3}
person_nodes = self.build_person_nodes_dict(person_horizontal_position_dict)
person_nodes[3].siblings = ['5', '6']
person_nodes[5].siblings = ['3', '6']
person_nodes[6].siblings = ['3', '5']
sorter = horizontal_sorter.HorizontalSorter(person_level_dict, self.edges, person_nodes)
sorter.person_horizontal_position_dict = person_horizontal_position_dict
sorter.position_person_dict = position_person_dict
sorter.put_siblings_next_each_other_bottom_up()
# expected_differences = [2, 2, 4]
# retrieved_differences = list()
# retrieved_differences.append(abs(
# sorter.person_horizontal_position_dict['5'] - sorter.person_horizontal_position_dict['3']))
# retrieved_differences.append(abs(
# sorter.person_horizontal_position_dict['6'] - sorter.person_horizontal_position_dict['3']))
# retrieved_differences.append(abs(
# sorter.person_horizontal_position_dict['6'] - sorter.person_horizontal_position_dict['5']))
# retrieved_differences.sort()
expected_difference = 2
positions_to_check = list()
positions_to_check.append(sorter.person_horizontal_position_dict['5'])
positions_to_check.append(sorter.person_horizontal_position_dict['3'])
positions_to_check.append(sorter.person_horizontal_position_dict['6'])
self.assertTrue(self.assert_list_differences(positions_to_check, expected_difference))
#self.assertEqual(expected_differences, retrieved_differences)
def not_test_are_there_persons_at_the_same_positions_false(self):
position_person_dict = {1: ['5', '3'], 2: ['1', '2'], 3: ['4']}
person_level_dict = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 4}
edges = {}
sorter = horizontal_sorter.HorizontalSorter(person_level_dict, edges)
self.assertFalse(sorter.are_there_persons_at_the_same_positions(position_person_dict))
def not_test_are_there_persons_at_the_same_positions_true(self):
position_person_dict = {1: ['5', '3'], 2: ['1', '2'], 3: ['4']}
person_level_dict = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 3}
edges = {}
sorter = horizontal_sorter.HorizontalSorter(person_level_dict, edges)
self.assertTrue(sorter.are_there_persons_at_the_same_positions(position_person_dict))
def not_test_get_level_of_person(self):
person_level_dict = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 4}
edges = {}
sorter = horizontal_sorter.Ho |
google-research/google-research | cache_replacement/policy_learning/cache/traces/train_test_split.py | Python | apache-2.0 | 4,756 | 0.012616 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
# Lint as: python3
"""Filters an access trace.
Given a CSV file containing (pc, address) in hex, filters the file to only
include the desired cache set accesses and splits the resulting trace into
train (80%) / valid (10%) / test (10%).
Example usage:
Suppose that the access trace exists at /path/to/file.csv
Results in the following three files: train.csv, valid.csv, test.csv.
python3 filter.py /path/to/file.csv
"""
import argparse
import csv
import os
import subprocess
import numpy as np
import tqdm
if __name__ == "__main__":
# The cache sets used in the paper:
# An Imitation Learning Approach to Cache Replacement
PAPER_CACHE_SETS = [6, 35, 38, 53, 67, 70, 113, 143, 157, 196, 287, 324, 332,
348, 362, 398, 406, 456, 458, 488, 497, 499, 558, 611,
718, 725, 754, 775, 793, 822, 862, 895, 928, 1062, 1086,
1101, 1102, 1137, 1144, 1175, 1210, 1211, 1223, 1237,
1268, 1308, 1342, 1348, 1353, 1424, 1437, 1456, 1574,
1599, 1604, 1662, 1683, 1782, 1789, 1812, 1905, 1940,
1967, 1973]
parser = argparse.ArgumentParser()
parser.add_argument(
"access_trace_filename", help="Local path to the access trace to filter.")
parser.add_argument(
"-s", "--cache_sets", default=PAPER_CACHE_SETS,
help=("Specifies which cache sets to keep. Defaults to the 64 sets used"
" in the paper."))
parser.add_argument(
"-a", "--associativity" | , default=16,
help="Associativity of the cache.")
parser.add_argument(
"-c", "--capacity", default=2 * 1024 * 1024,
help="Capacity of the cache.")
parser.add_argument(
"-l", "--cache_line_size", default=64,
help="Size of the cache lines in bytes.")
parser.add_argument(
"-b", "--batch_size", default=32,
help=("Ensures that train.csv, valid.csv, | and test.csv contain a number"
" of accesses that is a multiple of this. Use 1 to avoid this."))
args = parser.parse_args()
PREFIX = "_filter_traces"
output_filenames = ["train.csv", "valid.csv", "test.csv", "all.csv"]
output_filenames += [PREFIX + str(i) for i in range(10)]
for output_filename in output_filenames:
if os.path.exists(output_filename):
raise ValueError(f"File {output_filename} already exists.")
num_cache_lines = args.capacity // args.cache_line_size
num_sets = num_cache_lines // args.associativity
cache_bits = int(np.log2(args.cache_line_size))
set_bits = int(np.log2(num_sets))
num_lines = 0
accepted_cache_sets = set(args.cache_sets)
with open("all.csv", "w") as write:
with open(args.access_trace_filename, "r") as read:
for pc, address in tqdm.tqdm(csv.reader(read)):
pc = int(pc, 16)
address = int(address, 16)
aligned_address = address >> cache_bits
set_id = aligned_address & ((1 << set_bits) - 1)
if set_id in accepted_cache_sets:
num_lines += 1
write.write(f"0x{pc:x},0x{address:x}\n")
split_length = num_lines // 10
# Make split_length a multiple of batch_size
split_length = split_length // args.batch_size * args.batch_size
cmd = f"split -l {split_length} --numeric-suffixes all.csv {PREFIX}"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
# Removes the extra accesses that don't fit into batch_size multiples.
cmd = f"wc -l {PREFIX}10"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = f"rm {PREFIX}10"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
# Last split is test
# Second last split is valid
# First 8 splits are train
cmd = f"mv {PREFIX}09 test.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = f"mv {PREFIX}08 valid.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = f"cat {PREFIX}0[0-7] > train.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
# Clean up
cmd = f"rm {PREFIX}0[0-7]"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = "rm all.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
|
CyriusG/frontpage_backend | frontpage_backend/settings.template.py | Python | mit | 4,611 | 0.001301 | """
Django settings for frontpage_backend project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#e!xliikbaxc^&o!a8-@z9d(2p74x@kobt07o4y3z++$1&*1o5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Local apps
'movie',
'request',
'show',
'login',
# Third party
'corsheaders',
'rest_framework',
'django_cron',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'frontpage_backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], # This makes it OS independent
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'frontpage_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# REST Framework configuration settings.
REST_FRAMEWORK = {
# Disable the browsable api.
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Stockholm'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'our_static'),
)
# Authentication
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_DOMAIN = 'localhost'
SESSION_COOKIE_HTTPONLY = True
# Notification
MAILGUN_ENDPOINT = ''
MAILGUN_API_KEY = ''
NOTIFICATION_SENDER = ''
# CronJob
CRON_INTERVAL = 5
CRON_CLASSES = [
'movie.MovieAvailability',
]
# Plex settings
PLEX_OWNER = ''
PLEX_OWNER_TOKEN = ''
PLEX_HOST | = '127.0.0.1'
PLEX_PORT = '32400'
# Couchpotato settings
COUCHPOTATO_HOST = '127.0.0.1'
COUCHPOTATO | _PORT = '5050'
COUCHPOTATO_API_KEY = ''
# Sonarr settings
SONARR_HOST = '127.0.0.1'
SONARR_PORT = '8989'
SONARR_API_KEY = ''
SONARR_PATH = ''
SONARR_QUALITY = 1
# CORS Settings
CORS_ORIGIN_ALLOW_ALL = False
# Change this to whatever domain the cross origin requests originate from.
CORS_ORIGIN_WHITELIST = (
'localhost'
)
# Without this setting the frontend can't send cookies to the backend.
CORS_ALLOW_CREDENTIALS = True
|
TheAlgorithms/Python | conversions/decimal_to_octal.py | Python | mit | 1,211 | 0 | """Convert a Decimal Number to an Octal Number."""
import math
# Modified from:
# https://github.com/TheAlgorithms/Javascript/blob/master/Conversions/DecimalToOctal.js
def decimal_to_octal(num: int) -> str:
"""Convert a Decimal Number to an Octal Number.
>>> all(decimal_to_octal(i) == oct(i) for i
... in (0, 2, 8, 64, 65, 216, 255, 256, 512))
True
"""
octal = 0
c | ounter = 0
while num > 0:
remainder = num % 8
octal = octal + (remainder * math.floor(math.pow(10, counter)))
counter += 1
num = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"0o{int(octal)}"
def main() -> None:
"""Print octal equivalents of decimal | numbers."""
print("\n2 in octal is:")
print(decimal_to_octal(2)) # = 2
print("\n8 in octal is:")
print(decimal_to_octal(8)) # = 10
print("\n65 in octal is:")
print(decimal_to_octal(65)) # = 101
print("\n216 in octal is:")
print(decimal_to_octal(216)) # = 330
print("\n512 in octal is:")
print(decimal_to_octal(512)) # = 1000
print("\n")
if __name__ == "__main__":
main()
|
opencord/voltha | voltha/extensions/alarms/olt/olt_los_alarm.py | Python | apache-2.0 | 1,492 | 0.002681 | # Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this f | ile except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES | OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
from voltha.extensions.alarms.adapter_alarms import AlarmBase
class OltLosAlarm(AlarmBase):
def __init__(self, alarm_mgr, intf_id, port_type_name):
super(OltLosAlarm, self).__init__(alarm_mgr, object_type='olt LOS',
alarm='OLT_LOS',
alarm_category=AlarmEventCategory.OLT,
alarm_type=AlarmEventType.COMMUNICATION,
alarm_severity=AlarmEventSeverity.MAJOR)
# Added port type to indicate if alarm was on NNI or PON
self._intf_id = intf_id
self._port_type_name = port_type_name
def get_context_data(self):
return {'olt-intf-id:': self._intf_id,
'olt-port-type-name': self._port_type_name}
|
vincepandolfo/django | django/db/models/options.py | Python | bsd-3-clause | 29,986 | 0.001301 | from __future__ import unicode_literals
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name',
'required_db_features', 'required_db_vendor')
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = {'fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map'}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from | (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract | (=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join |
eykomm/ctf_prep | scripts/tcp_client.py | Python | apache-2.0 | 246 | 0.028455 | import socket
target_host=""
target_port=
client=socke | t.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connec | t((target_host,target_port))
client.send("GET / HTTP/1.1\r\nHost: google.com\r\n\r\n")
response=client.recv(4096)
print response |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.