repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
alexisbellido/dockerize-django | python-click/example.py | Python | bsd-3-clause | 1,458 | 0.005487 | # -- coding: utf-8 --
import click
import yaml
import json
@click.command()
@click.option('-n', '--accession_number', help='Accession number')
@click.option('-e', '--environment', default='local', help='Environment.')
@click.option('--input', type=click.File('r'), required=False)
@click.option('--dryrun', is_flag=True, help='Dry run, nothing is actually changed.')
def process(environment, dryrun, accession_number=None, input=None):
"""A simple script."""
with open('config.yaml', 'r') as f:
config = yaml.load(f)
if not accession_number and not input:
click.echo('Nothing to process. Pass an accession number or input file\n')
exit()
click.echo(config)
accession_numbers = []
if accession_number:
accession_numbers.append(accession_number)
elif input:
for accession_number in input:
accession_numbers.append(accession_number.rstrip())
count=0
for accession_number in acc | ession_numbers:
count += 1
click.echo('===========================================================\n')
click.echo('{count} - Running for {accession_number} on {environment}\n'.format(
count=count,
accession_number=accession_number,
environment=environment |
))
if dryrun:
click.echo("\nDry run...\n")
else:
click.echo("\nActual run...\n")
if __name__ == '__main__':
process()
|
jeremiahyan/odoo | addons/gamification/models/gamification_badge.py | Python | gpl-3.0 | 9,182 | 0.002941 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from datetime import date
from odoo import api, fields, models, _, exceptions
_logger = logging.getLogger(__name__)
class GamificationBadge(models.Model):
"""Badge object that users can send and receive"""
CAN_GRANT = 1
NOBODY_CAN_GRANT = 2
USER_NOT_VIP = 3
BADGE_REQUIRED = 4
TOO_MANY = 5
_name = 'gamification.badge'
_description = 'Gamification Badge'
_inherit = ['mail.thread', 'image.mixin']
name = fields.Char('Badge', required=True, translate=True)
active = fields.Boolean('Active', default=True)
description = fields.Html('Description', translate=True)
level = fields.Selection([
('bronze', 'Bronze'), ('silver', 'Silver'), ('gold', 'Gold')],
string='Forum Badge Level', default='bronze')
rule_auth = fields.Selection([
('everyone', 'Everyone'),
('users', 'A selected list of users'),
('having', 'People having some badges'),
('nobody', 'No one, assigned through challenges'),
], default='everyone',
string="Allowance to Grant", help="Who can grant this badge", required=True)
rule_auth_user_ids = fields.Many2many(
'res.users', 'rel_badge_auth_users',
string='Authorized Users',
help="Only these people can give this badge")
rule_auth_badge_ids = fields.Many2many(
'gamification.badge', 'gamification_badge_rule_badge_rel', 'badge1_id', 'badge2_id',
string='Required Badges',
help="Only the people having these badges can give this badge")
rule_max = fields.Boolean('Monthly Limited Sending', help="Check to set a monthly limit per person of sending this badge")
rule_max_number = fields.Integer('Limitation Number', help="The maximum number of time this badge can be sent per month per person.")
challenge_ids = fields.One2many('gamification.challenge', 'reward_id', string="Reward of Challenges")
goal_definition_ids = fields.Many2many(
'gamification.goal.definition', 'badge_unlocked_definition_rel',
string='Rewarded by', help="The users that have succeeded theses goals will receive automatically the badge.")
owner_ids = fields.One2many(
'gamification.badge.user', 'badge_id',
string='Owners', help='The list of instances of this badge granted to users')
granted_count = fields.Integer("Total", compute='_get_owners_info', help="The number of time this badge has been received.")
granted_users_count = fields.Integer("Number of users", compute='_get_owners_info', help="The number of time this badge has been received by unique users.")
unique_owner_ids = fields.Many2many(
'res.users', string="Unique Owners", compute='_get_owners_info',
help="The list of unique users having received this badge.")
stat_this_month = fields.Integer(
"Monthly total", compute='_get_badge_user_stats',
help="The number of time this badge has been received this month.")
stat_my = fields.Integer(
"My Total", compute='_get_badge_user_stats',
help="The number of time the current user has received this badge.")
stat_my_this_month = fields.Integer(
"My Monthly Total", compute='_ | get_badge_user_stats',
help="The number of time the current user has received this badge this month.")
stat_my_monthly_sending = fields.Integer(
'My Monthly Sending Total',
compute='_get_badge_user_stats',
help="The number of time the current user has sent this badge this month. | ")
remaining_sending = fields.Integer(
"Remaining Sending Allowed", compute='_remaining_sending_calc',
help="If a maximum is set")
@api.depends('owner_ids')
def _get_owners_info(self):
"""Return:
the list of unique res.users ids having received this badge
the total number of time this badge was granted
the total number of users this badge was granted to
"""
defaults = {
'granted_count': 0,
'granted_users_count': 0,
'unique_owner_ids': [],
}
if not self.ids:
self.update(defaults)
return
Users = self.env["res.users"]
query = Users._where_calc([])
Users._apply_ir_rules(query)
badge_alias = query.join("res_users", "id", "gamification_badge_user", "user_id", "badges")
tables, where_clauses, where_params = query.get_sql()
self.env.cr.execute(
f"""
SELECT {badge_alias}.badge_id, count(res_users.id) as stat_count,
count(distinct(res_users.id)) as stat_count_distinct,
array_agg(distinct(res_users.id)) as unique_owner_ids
FROM {tables}
WHERE {where_clauses}
AND {badge_alias}.badge_id IN %s
GROUP BY {badge_alias}.badge_id
""",
[*where_params, tuple(self.ids)]
)
mapping = {
badge_id: {
'granted_count': count,
'granted_users_count': distinct_count,
'unique_owner_ids': owner_ids,
}
for (badge_id, count, distinct_count, owner_ids) in self.env.cr._obj
}
for badge in self:
badge.update(mapping.get(badge.id, defaults))
@api.depends('owner_ids.badge_id', 'owner_ids.create_date', 'owner_ids.user_id')
def _get_badge_user_stats(self):
"""Return stats related to badge users"""
first_month_day = date.today().replace(day=1)
for badge in self:
owners = badge.owner_ids
badge.stat_my = sum(o.user_id == self.env.user for o in owners)
badge.stat_this_month = sum(o.create_date.date() >= first_month_day for o in owners)
badge.stat_my_this_month = sum(
o.user_id == self.env.user and o.create_date.date() >= first_month_day
for o in owners
)
badge.stat_my_monthly_sending = sum(
o.create_uid == self.env.user and o.create_date.date() >= first_month_day
for o in owners
)
@api.depends(
'rule_auth',
'rule_auth_user_ids',
'rule_auth_badge_ids',
'rule_max',
'rule_max_number',
'stat_my_monthly_sending',
)
def _remaining_sending_calc(self):
"""Computes the number of badges remaining the user can send
0 if not allowed or no remaining
integer if limited sending
-1 if infinite (should not be displayed)
"""
for badge in self:
if badge._can_grant_badge() != self.CAN_GRANT:
# if the user cannot grant this badge at all, result is 0
badge.remaining_sending = 0
elif not badge.rule_max:
# if there is no limitation, -1 is returned which means 'infinite'
badge.remaining_sending = -1
else:
badge.remaining_sending = badge.rule_max_number - badge.stat_my_monthly_sending
def check_granting(self):
"""Check the user 'uid' can grant the badge 'badge_id' and raise the appropriate exception
if not
Do not check for SUPERUSER_ID
"""
status_code = self._can_grant_badge()
if status_code == self.CAN_GRANT:
return True
elif status_code == self.NOBODY_CAN_GRANT:
raise exceptions.UserError(_('This badge can not be sent by users.'))
elif status_code == self.USER_NOT_VIP:
raise exceptions.UserError(_('You are not in the user allowed list.'))
elif status_code == self.BADGE_REQUIRED:
raise exceptions.UserError(_('You do not have the required badges.'))
elif status_code == self.TOO_MANY:
raise exceptions.UserError(_('You have already sent this badge too many time this month.'))
else:
_logger.error("Unknown badge status code: %s" % status_code)
return False
def _can_gr |
sharad/calibre | src/calibre/customize/builtins.py | Python | gpl-3.0 | 66,256 | 0.007274 | # -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import os, glob, functools, re
from calibre import guess_type
from calibre.customize import (FileTypePlugin, MetadataReaderPlugin,
MetadataWriterPlugin, PreferencesPlugin, InterfaceActionBase, StoreBase)
from calibre.constants import numeric_version
from calibre.ebooks.metadata.archive import ArchiveExtract, get_comic_metadata
from calibre.ebooks.html.to_zip import HTML2ZIP
plugins = []
# To archive plugins {{{
class PML2PMLZ(FileTypePlugin):
name = 'PML to PMLZ'
author = 'John Schember'
description = _('Create a PMLZ archive containing the PML file '
'and all images in the directory pmlname_img or images. '
'This plugin is run every time you add '
'a PML file to the library.')
version = numeric_version
file_types = set(['pml'])
supported_platforms = ['windows', 'osx', 'linux']
on_import = True
def run(self, pmlfile):
import zipfile
of = self.temporary_file('_plugin_pml2pmlz.pmlz')
pmlz = zipfile.ZipFile(of.name, 'w')
pmlz.write(pmlfile, os.path.basename(pmlfile), zipfile.ZIP_DEFLATED)
pml_img = os.path.splitext(pmlfile)[0] + '_img'
i_img = os.path.join(os.path.dirname(pmlfile),'images')
img_dir = pml_img if os.path.isdir(pml_img) else i_img if \
os.path.isdir(i_img) else ''
if img_dir:
for image in glob.glob(os.path.join(img_dir, '*.png')):
pmlz.write(image, os.path.join('images', (os.path.basename(image))))
pmlz.close()
return of.name
class TXT2TXTZ(FileTypePlugin):
name = 'TXT to TXTZ'
author = 'John Schember'
description = _('Create a TXTZ archive when a TXT file is imported '
'containing Markdown or Textile references to images. The referenced '
'images as well as the TXT file are added to the archive.')
version = numeric_version
file_types = set(['txt', 'text'])
supported_platforms = ['windows', 'osx', 'linux']
on_import = True
def _get_image_references(self, txt, base_dir):
from calibre.ebooks.oeb.base import OEB_IMAGES
images = []
# Textile
for m in re.finditer(ur'(?mu)(?:[\[{])?\!(?:\. )?(?P<path>[^\s(!]+)\s?(?:\(([^\)]+)\))?\!(?::(\S+))?(?:[\]}]|(?=\s|$))', txt):
path = m.group('path')
if path and not os.path.isabs(path) and guess_type(path)[0] in OEB_IMAGES and os.path.exists(os.path.join(base_dir, path)):
images.append(path)
# Markdown inline
for m in re.finditer(ur'(?mu)\!\[([^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*)\]\s*\((?P<path>[^\)]*)\)', txt): # noqa
path = m.group('path')
if path and not os.path.isabs(path) and guess_type(path)[0] in OEB_IMAGES and os.path.exists(os.path.join(base_dir, path)):
images.append(path)
# Markdown reference
refs = {}
for m in re.finditer(ur'(?mu)^(\ ?\ ?\ ?)\[(?P<id>[^\]]*)\]:\s*(?P<path>[^\s]*)$', txt):
if m.group('id') and m.group('path'):
refs[m.group('id')] = m.group('path')
for m in re.finditer(ur'(?mu)\!\[([^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*(\[[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*\])*[^\]\[]*)\]\s*\[(?P<id>[^\]]*)\]', txt): # noqa
path = refs.get(m.group('id'), None)
if path and not os.path.isabs(path) and guess_type(path)[0] in OEB_IMAGES and os.path.exists(os.path.join(base_dir, path)):
images.append(path)
# Remove duplicates
return list(set(images))
def run(self, path_to_ebook):
from calibre.ebooks.metadata.opf2 import metadata_to_opf
with open(path_to_ebook, 'rb') as ebf:
txt = ebf.read()
base_dir = os.path.dirname(path_to_ebook)
images = self._get_image_references(txt, base_dir)
if images:
# Create TXTZ and put file plus images inside of it.
import zipfile
of = self.temporary_file('_plugin_txt2txtz.txtz')
txtz = zipfile.ZipFile(of.name, 'w')
# Add selected TXT file to archive.
txtz.write(path_to_ebook, os.path.basename(path_to_ebook), zipfile.ZIP_DEFLATED)
# metadata.opf
if os.path.exists(os.path.join(base_dir, 'metadata.opf')):
txtz.write(os.path.join(base_dir, 'metadata.opf'), 'metadata.opf', zipfile.ZIP_DEFLATED)
else:
from calibre.ebooks.metadata.txt import get_metadata
with open(path_to_ebook, 'rb') as ebf:
mi = get_metadata(ebf)
opf = metadata_to_opf(mi)
txtz.writestr('metadata.opf', opf, zipfile.ZIP_DEFLATED)
# images
for image in images:
txtz.write(os.path.join(base_dir, image), image)
txtz.close()
return of.name
else:
# No images so just import the TXT file.
return path_to_ebook
plugins += [HTML2ZIP, PML2PMLZ, TXT2TXTZ, ArchiveExtract,]
# }}}
# Metadata reader plugins {{{
class ComicMetadataReader(MetadataReaderPlugin):
name = 'Read comic metadata'
file_types = set(['cbr', 'cbz'])
description = _('Extract cover from comic files')
def customization_help(self, gui=False):
return 'Read series number from volume or issue number. Default is volume, set this to issue to use issue number instead.'
def get_metadata(self, stream, ftype):
if hasattr(stream, 'seek') and hasattr(stream, 'tell'):
pos = stream.tell()
id_ = stream.read(3)
stream.seek(pos)
if id_ == b'Rar':
ftype = 'cbr'
elif id_.startswith(b'PK'):
ftype = 'cbz'
if ftype == 'cbr':
from calibre.utils.unrar import extract_first_alphabetically as extract_first
extract_first
else:
from calibre.libunzip import extract_member
extract_first = functools.partial(extract_member,
sort_alphabetically=True)
from calibre.ebooks.metadata import MetaInformation
ret = extract_first(stream)
mi = MetaInformation(None, None)
stream.seek(0)
if ftype in {'cbr', 'cbz'}:
series_index = self.site_customization
if series_index not in {'volume', 'issue'}:
series_index = 'volume'
try:
mi.smart_update(get_comic_metadata(stream, ftype, series_index=series_index))
exc | ept:
pass
if ret is not None:
path, data = ret
ext = os.path.splitext(path)[1][1:]
mi.cover_data = (ext.lower(), data)
return mi
class CHMMeta | dataReader(MetadataReaderPlugin):
name = 'Read CHM metadata'
file_types = set(['chm'])
description = _('Read metadata from %s files') % 'CHM'
def get_metadata(self, stream, ftype):
from calibre.ebooks.chm.metadata import get_metadata
return get_metadata(stream)
class EPUBMetadataReader(MetadataReaderPlugin):
name = 'Read EPUB metadata'
file_types = set(['epub'])
description = _('Read metadata from %s files')%'EPUB'
def get_metadata(self, stream, ftype):
from calibre.ebooks.metadata.epub import get_metadata, get_quick_metadata
if self.quick:
return get_quick_metadata(stream)
return get_metadata(stream)
class FB2MetadataReader(MetadataReaderPlugin):
name = 'Read FB2 metadata'
file_types = set(['fb2'])
description = _('Read metadata from %s files')%'FB2'
def get_metadata(self, stream, ftype):
from calibre.ebooks.metadata.fb2 import get_metadata
return get_metadata(stream)
class HTMLMetadataReader(MetadataReaderPlugin):
name = 'Read HTML metadata'
file_types = set(['html' |
etamponi/taxonomy-generator | deltaphi/test_category_group.py | Python | gpl-2.0 | 3,675 | 0.001905 | import unittest
import numpy
from deltaphi.category_info import RawCategoryInfo, CategoryGroup, CategoryInfoFactory
from deltaphi.fake_entities import FakeCategoryInfo
__author__ = 'Emanuele Tamponi'
class TestCategoryGroup(unittest.TestCase):
def setUp(self):
self.builder = CategoryInfoFactory({"a", "b", "c"})
def test_build_parent_pairwise(self):
ci1 = self.builder.build(RawCategoryInfo("C1", 100, {"a": 50, "c": 80}))
ci2 = self.builder.build(RawCategoryInfo("C2", 80, {"b": 40, "c": 20}))
merged = CategoryGroup([ci1, ci2]).build_parent_info()
numpy | .testing.assert_array_equal([50, 40, 10 | 0], merged.frequencies)
self.assertEqual("(C1+C2)", merged.category)
self.assertEqual(180, merged.documents)
self.assertEqual(CategoryGroup([ci1, ci2]), merged.child_group)
def test_build_parent_multiple(self):
ci1 = self.builder.build(RawCategoryInfo("C1", 100, {"a": 50, "c": 80}))
ci2 = self.builder.build(RawCategoryInfo("C2", 80, {"b": 40, "c": 20}))
ci3 = self.builder.build(RawCategoryInfo("C3", 130, {"a": 20, "b": 20, "c": 30}))
merged = CategoryGroup([ci1, ci2, ci3]).build_parent_info()
numpy.testing.assert_array_equal([70, 60, 130], merged.frequencies)
self.assertEqual("(C1+C2+C3)", merged.category)
self.assertEqual(310, merged.documents)
self.assertEqual(CategoryGroup([ci1, ci2, ci3]), merged.child_group)
def test_hierarchical_build_node(self):
ci1 = self.builder.build(RawCategoryInfo("C1", 100, {"a": 50, "c": 80}))
ci2 = self.builder.build(RawCategoryInfo("C2", 80, {"b": 40, "c": 20}))
ci3 = self.builder.build(RawCategoryInfo("C3", 130, {"a": 20, "b": 20, "c": 30}))
middle = CategoryGroup([ci1, ci2]).build_parent_info()
merged = CategoryGroup([ci3, middle]).build_parent_info()
numpy.testing.assert_array_equal([70, 60, 130], merged.frequencies)
self.assertEqual("((C1+C2)+C3)", merged.category)
self.assertEqual(310, merged.documents)
self.assertEqual(CategoryGroup([ci3, middle]), merged.child_group)
def test_category_group_one_vs_siblings(self):
ci1 = FakeCategoryInfo("C1", 4)
ci2 = FakeCategoryInfo("C2", 4)
ci3 = FakeCategoryInfo("C3", 4)
expected_info_pair = [
(ci1, CategoryGroup([ci2, ci3]).build_parent_info()),
(ci2, CategoryGroup([ci1, ci3]).build_parent_info()),
(ci3, CategoryGroup([ci1, ci2]).build_parent_info())
]
group = CategoryGroup([ci1, ci2, ci3])
for expected_info_pair, actual_info_pair in zip(expected_info_pair, group.one_vs_siblings()):
self.assertEqual(expected_info_pair[0], actual_info_pair[0])
self.assertEqual(expected_info_pair[1], actual_info_pair[1])
def test_leafs(self):
ci1 = FakeCategoryInfo("C1", 4)
ci2 = FakeCategoryInfo("C2", 4)
ci3 = FakeCategoryInfo("C3", 4)
ci4 = FakeCategoryInfo("C4", 4)
ci12 = CategoryGroup([ci1, ci2]).build_parent_info()
ci34 = CategoryGroup([ci3, ci4]).build_parent_info()
g12 = CategoryGroup([ci12])
g34 = CategoryGroup([ci34])
expected_leafs = [ci1, ci2]
self.assertEqual(expected_leafs, g12.leafs())
expected_leafs = [ci3, ci4]
self.assertEqual(expected_leafs, g34.leafs())
g1234 = CategoryGroup([ci12, ci34])
expected_leafs = [ci1, ci2, ci3, ci4]
self.assertEqual(expected_leafs, g1234.leafs())
g1234 = CategoryGroup([g1234.build_parent_info()])
self.assertEqual(expected_leafs, g1234.leafs())
|
morgenst/pyfluka | pyfluka/utils/OrderedYAMLExtension.py | Python | mit | 460 | 0 | import yaml
from collecti | ons import OrderedDict
def dump(data, stream=None, dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items()
)
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds | )
|
xupingmao/xnote | tests/test_base.py | Python | gpl-3.0 | 4,620 | 0.007193 | # encoding=utf-8
import sys
sys.path.insert(1, "lib")
sys.path.insert(1, "core")
import os
import time
import unittest
import xconfig
import xutils
import xtables
import xmanager
import xtemplate
import web
import six
import json
import xauth
from xutils import dbutil
from handlers.fs.fs_upload import get_upload_file_path
config = xconfig
date = time.strftime("%Y/%m")
APP = None
DEFAULT_HEADERS = dict()
def init():
global APP
if APP is not None:
return APP
xconfig.IS_TEST = True
xconfig.port = "1234"
xconfig.DEV_MODE = True
var_env = dict()
xutils.remove_file("./testdata/data.db", hard = True)
xconfig.init("./testdata")
xtables.init()
dbutil.init(xconfig.DB_DIR)
xutils.init(xconfig)
xauth.init()
xutils.cacheutil.init(xconfig.STORAGE_DIR)
APP = web.application(list(), var_env, autoreload=False)
last_mapping = (r"/tools/(.*)", "handlers.tools.tools.handler")
mgr = xmanager.init(APP, var_env, last_mapping=last_mapping)
mgr.reload()
# 加载template
xtemplate.reload()
# 发送启动消息
xmanager.fire("sys.reload")
return APP
def json_request(*args, **kw):
global APP
if "data" in kw:
# 对于POST请求设置无效
kw["data"]["_format"] = "json"
else:
kw["data"] = dict(_format="json")
kw["_format"] = "json"
kw["headers"] = DEFAULT_HEADERS
ret = APP.request(*args, **kw)
if ret.status == "303 See Other":
return
assert ret.status == "200 OK"
data = ret.data
if six.PY2:
return json.loads(data)
return json.loads(data.decode("utf-8"))
def request_html(*args, **kw):
ret = APP.request(*args, **kw)
return ret.data
def create_tmp_file(name):
path = os.path.join(xconfig.DATA_DIR, "files", "user", "upload", time.strftime("%Y/%m"), name)
xutils.touch(path)
def remove_tmp_file(name):
path = os.path.join(xconfig.DATA_DIR, "files", "user", "upload", time.strftime("%Y/%m"), name)
if os.path.exists(path):
os.remove(path)
class BaseTestCase(unittest.TestCase):
def check_OK(self, *args, **kw):
response = APP.request(*args, **kw)
status = response.status
print(status)
self.assertEqual(True, status == "200 OK" or status == "303 See Other" or status == "302 Found")
def check_200(self, *ar | gs, **kw):
response = APP.request(*args, **kw)
self.assertEqual("200 OK", response.status)
def check_200_debug(self, *args, **kw):
response = APP.request(*args, **kw)
print(args, kw, response)
print(APP.mapping)
se | lf.assertEqual("200 OK", response.status)
def check_303(self, *args, **kw):
response = APP.request(*args, **kw)
self.assertEqual("303 See Other", response.status)
def check_404(self, url):
response = APP.request(url)
self.assertEqual("404 Not Found", response.status)
def check_status(self, status, *args, **kw):
response = APP.request(*args, **kw)
self.assertEqual(status, response.status)
def json_request(self, *args, **kw):
return json_request(*args, **kw)
class BaseTestMain(unittest.TestCase):
def test_get_upload_file_path(self):
remove_tmp_file("test.txt")
path, webpath = get_upload_file_path("user", "test.txt")
print()
print(path)
print(webpath)
self.assertEqual(os.path.abspath(config.DATA_PATH + "/files/user/upload/%s/test.txt" % date), path)
self.assertEqual("/data/files/user/upload/%s/test.txt" % date, webpath)
def test_get_upload_file_path_1(self):
remove_tmp_file("test_1.txt")
create_tmp_file("test.txt")
path, webpath = get_upload_file_path("user", "test.txt")
print()
print(path)
print(webpath)
self.assertEqual(os.path.abspath(config.DATA_PATH + "/files/user/upload/%s/test_1.txt" % date), path)
self.assertEqual("/data/files/user/upload/%s/test_1.txt" % date, webpath)
remove_tmp_file("test.txt")
def test_get_upload_file_path_2(self):
create_tmp_file("test.txt")
create_tmp_file("test_1.txt")
remove_tmp_file("test_2.txt")
path, webpath = get_upload_file_path("user", "test.txt")
print()
print(path)
print(webpath)
self.assertEqual(os.path.abspath(config.DATA_PATH + "/files/user/upload/%s/test_2.txt" % date), path)
self.assertEqual("/data/files/user/upload/%s/test_2.txt" % date, webpath)
remove_tmp_file("test.txt")
remove_tmp_file("test_1.txt")
|
ploggingdev/practice | algos/tests/mergesort_tests.py | Python | gpl-3.0 | 437 | 0.011442 | from unittest import TestCase
from random import random
from algos.mergesort import merge, mergesort
class MergeSortTest(TestCase): |
def test_merge_sort(self):
seq = [random() for _ in range(4000)]
sorted_seq = sorted(seq)
self.assertEqual(mergesort(seq), sorted_seq)
def test_merge_sort_2(self):
| self.assertEqual(mergesort(list()), list())
if __name__ == '__main__':
unittest.main() |
qiime2/qiime2 | qiime2/sdk/tests/test_actiongraph.py | Python | bsd-3-clause | 5,365 | 0 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ---------------------------------------------------- | ------------------------
import unittest
from qiime2.core.testing.type import (Mapping, IntSequence1, IntSequence2)
from qiime2.core.type.primitive import (Int, Str, Metadata)
from qiime2.core.type.visualization import (Visualization)
from qiime2.core.testing.util import get_dummy_plugin
from qiime2.sdk.actiongraph import build_graph
class TestActiongraph(unittest.TestCase):
def setUp(self):
self.plugin = | get_dummy_plugin()
self.g = None
def test_simple_graph(self):
methods = [self.plugin.actions['no_input_method']]
self.g = build_graph(methods)
obs = list(self.g.nodes)
exp_node = str({
'inputs': {},
'outputs': {
'out': Mapping
},
})
type_node = Mapping
exp = [type_node, exp_node]
for item in obs:
assert item in exp
assert self.g.has_edge(str(exp_node), type_node)
def test_cycle_in_graph_no_params(self):
methods = [self.plugin.actions['docstring_order_method']]
self.g = build_graph(methods)
obs = list(self.g.nodes)
exp = [Mapping, Str]
exp_node = str({
'inputs': {
'req_input': Mapping,
'req_param': Str,
},
'outputs': {
'out': Mapping
},
})
exp += [exp_node]
for item in obs:
assert item in exp
assert self.g.in_degree(exp_node) == 2
assert self.g.out_degree(exp_node) == 1
def test_cycle_in_graph_with_params(self):
methods = [self.plugin.actions['docstring_order_method']]
self.g = build_graph(methods, True)
obs = list(self.g.nodes)
exp = [Mapping, Int, Str, 'opt_Mapping', 'opt_Int']
exp_node = str({
'inputs': {
'req_input': Mapping,
'req_param': Str,
'opt_input': Mapping,
'opt_param': Int
},
'outputs': {
'out': Mapping
},
})
exp += [exp_node]
for item in obs:
assert item in exp
assert self.g.in_degree(exp_node) == 4
assert self.g.out_degree(exp_node) == 1
def test_union(self):
vis = [self.plugin.actions['most_common_viz']]
self.g = build_graph(vis)
obs = list(self.g.nodes)
exp = [Visualization, IntSequence1, IntSequence2]
exp_node_1 = str({
'inputs': {
'ints': IntSequence1,
},
'outputs': {
'visualization': Visualization
},
})
exp_node_2 = str({
'inputs': {
'ints': IntSequence2,
},
'outputs': {
'visualization': Visualization
},
})
exp += [exp_node_1, exp_node_2]
for item in obs:
assert item in exp
assert self.g.in_degree(exp_node_1) == 1
assert self.g.out_degree(exp_node_1) == 1
assert self.g.in_degree(exp_node_2) == 1
assert self.g.out_degree(exp_node_2) == 1
assert self.g.in_degree(Visualization) == 2
assert self.g.out_degree(Visualization) == 0
def test_multiple_outputs(self):
actions = [self.plugin.actions['visualizer_only_pipeline']]
self.g = build_graph(actions)
obs = list(self.g.nodes)
exp = [Visualization, Mapping]
exp_node = str({
'inputs': {
'mapping': Mapping
},
'outputs': {
'viz1': Visualization,
'viz2': Visualization
},
})
exp += [exp_node]
for item in obs:
assert item in exp
assert self.g.in_degree(exp_node) == 1
assert self.g.out_degree(exp_node) == 1
def test_metadata(self):
actions = [self.plugin.actions['identity_with_metadata']]
self.g = build_graph(actions)
obs = list(self.g.nodes)
exp = [Metadata, IntSequence1, IntSequence2]
exp_node_1 = str({
'inputs': {
'ints': IntSequence1,
'metadata': Metadata
},
'outputs': {
'out': IntSequence1
},
})
exp_node_2 = str({
'inputs': {
'ints': IntSequence2,
'metadata': Metadata
},
'outputs': {
'out': IntSequence1
},
})
exp += [exp_node_1, exp_node_2]
for item in obs:
assert item in exp
assert self.g.in_degree(exp_node_1) == 2
assert self.g.out_degree(exp_node_1) == 1
assert self.g.in_degree(exp_node_1) == 2
assert self.g.out_degree(exp_node_1) == 1
assert self.g.in_degree(IntSequence1) == 2
assert self.g.out_degree(IntSequence1) == 1
if __name__ == '__main__':
unittest.main()
|
fbradyirl/home-assistant | tests/components/alert/__init__.py | Python | apache-2.0 | 37 | 0 | "" | "Tests for the alert compo | nent."""
|
nkgilley/home-assistant | homeassistant/components/transport_nsw/sensor.py | Python | apache-2.0 | 4,543 | 0 | """Support for Transport NSW (AU) to query next leave event."""
from datetime import timedelta
import logging
from TransportNSW import TransportNSW
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_MODE,
CONF_API_KEY,
CONF_NAME,
TIME_MINUTES,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_STOP_ID = "stop_id"
ATTR_ROUTE = "route"
ATTR_DUE_IN = "due"
ATTR_DELAY = "delay"
ATTR_REAL_TIME = "real_time"
ATTR_DESTINATION = "destination"
ATTRIBUTION = "Data provided by Transport NSW"
CONF_STOP_ID = "stop_id"
CONF_ROUTE = "route"
CONF_DESTINATION = "destination"
DEFAULT_NAME = "Next Bus"
ICONS = {
"Train": "mdi:train",
"Lightrail": "mdi:tram",
"Bus": "mdi:bus",
"Coach": "mdi:bus",
"Ferry": "mdi:ferry",
"Schoolbus": "mdi:bus",
"n/a": "mdi:clock",
None: "mdi:clock",
}
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STOP_ID): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_ROUTE, default=""): cv.string,
vol.Optional(CONF_DESTINATION, default=""): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Transport NSW sensor."""
stop_id = config[CONF_STOP_ID]
api_key = config[CONF_API_KEY]
route = config.get(CONF_ROUTE)
destination = config.get(CONF_DESTINATION)
name = config.get(CONF_NAME)
data = PublicTransportData(stop_id, route, destination, api_key)
add_entities([TransportNSWSensor(data, stop_id, name)], True)
class TransportNSWSensor(Entity):
"""Implementation of an Transport NSW sensor."""
def __init__(self, data, stop_id, name):
"""Initialize the sensor."""
self.data = data
self._name = name
self._stop_id = stop_id
self._times = self._state = None
self._icon = ICONS[None]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
| def device_state_attributes(self):
"""Return the state attributes."""
if self._times is not None:
return {
ATTR_DUE_IN: self._times[ATTR_DUE_IN],
ATTR_STOP_ID: self._stop_id,
ATTR_ROUTE: self._times[ATTR_ROUTE],
ATTR_DELAY: self._times[ATTR_DELAY],
ATTR_REAL_TIME: self._times[ATTR_REAL_TIME],
ATTR_DESTINATION: self._times[ATTR_DESTINATION],
| ATTR_MODE: self._times[ATTR_MODE],
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TIME_MINUTES
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
def update(self):
"""Get the latest data from Transport NSW and update the states."""
self.data.update()
self._times = self.data.info
self._state = self._times[ATTR_DUE_IN]
self._icon = ICONS[self._times[ATTR_MODE]]
class PublicTransportData:
"""The Class for handling the data retrieval."""
def __init__(self, stop_id, route, destination, api_key):
"""Initialize the data object."""
self._stop_id = stop_id
self._route = route
self._destination = destination
self._api_key = api_key
self.info = {
ATTR_ROUTE: self._route,
ATTR_DUE_IN: "n/a",
ATTR_DELAY: "n/a",
ATTR_REAL_TIME: "n/a",
ATTR_DESTINATION: "n/a",
ATTR_MODE: None,
}
self.tnsw = TransportNSW()
def update(self):
"""Get the next leave time."""
_data = self.tnsw.get_departures(
self._stop_id, self._route, self._destination, self._api_key
)
self.info = {
ATTR_ROUTE: _data["route"],
ATTR_DUE_IN: _data["due"],
ATTR_DELAY: _data["delay"],
ATTR_REAL_TIME: _data["real_time"],
ATTR_DESTINATION: _data["destination"],
ATTR_MODE: _data["mode"],
}
|
zhang0137/chromite | lib/table_unittest.py | Python | bsd-3-clause | 12,584 | 0.00739 | #!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the table module."""
import cStringIO
import os
import sys
import tempfile
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import table
# pylint: disable=W0212,R0904
class TableTest(cros_test_lib.TestCase):
"""Unit tests for the Table class."""
COL0 = 'Column1'
COL1 = 'Column2'
COL2 = 'Column3'
COL3 = 'Column4'
COLUMNS = [COL0, COL1, COL2, COL3]
ROW0 = {COL0: 'Xyz', COL1: 'Bcd', COL2: 'Cde'}
ROW1 = {COL0: 'Abc', COL1: 'Bcd', COL2: 'Opq', COL3: 'Foo'}
ROW2 = {COL0: 'Abc', COL1: 'Nop', COL2: 'Wxy', COL3: 'Bar'}
EXTRAROW = {CO | L1: 'Walk', COL2: 'The', COL3: 'Line'}
ROW0a = {COL0: 'Xyz', COL1: 'Bcd', COL2: 'Cde', COL3: 'Yay'}
ROW0b = {COL0: 'Xyz', COL1: 'Bcd', COL2: 'Cde', COL3: 'Boo'}
ROW1a = {COL0: 'Abc', COL1: 'Bcd', COL2: 'Opq', COL3: 'B | lu'}
EXTRACOL = 'ExtraCol'
EXTRACOLUMNS = [COL0, EXTRACOL, COL1, COL2]
EROW0 = {COL0: 'Xyz', EXTRACOL: 'Yay', COL1: 'Bcd', COL2: 'Cde'}
EROW1 = {COL0: 'Abc', EXTRACOL: 'Hip', COL1: 'Bcd', COL2: 'Opq'}
EROW2 = {COL0: 'Abc', EXTRACOL: 'Yay', COL1: 'Nop', COL2: 'Wxy'}
def _GetRowValsInOrder(self, row):
"""Take |row| dict and return correctly ordered values in a list."""
vals = []
for col in self.COLUMNS:
vals.append(row.get(col, ""))
return vals
def _GetFullRowFor(self, row, cols):
return dict((col, row.get(col, '')) for col in cols)
def assertRowsEqual(self, row1, row2):
# Determine column superset
cols = set(row1.keys() + row2.keys())
self.assertEquals(self._GetFullRowFor(row1, cols),
self._GetFullRowFor(row2, cols))
def assertRowListsEqual(self, rows1, rows2):
for (row1, row2) in zip(rows1, rows2):
self.assertRowsEqual(row1, row2)
def setUp(self):
self._table = self._CreateTableWithRows(self.COLUMNS,
[self.ROW0, self.ROW1, self.ROW2])
def _CreateTableWithRows(self, cols, rows):
mytable = table.Table(list(cols))
if rows:
for row in rows:
mytable.AppendRow(dict(row))
return mytable
def testLen(self):
self.assertEquals(3, len(self._table))
def testGetNumRows(self):
self.assertEquals(3, self._table.GetNumRows())
def testGetNumColumns(self):
self.assertEquals(4, self._table.GetNumColumns())
def testGetColumns(self):
self.assertEquals(self.COLUMNS, self._table.GetColumns())
def testGetColumnIndex(self):
self.assertEquals(0, self._table.GetColumnIndex(self.COL0))
self.assertEquals(1, self._table.GetColumnIndex(self.COL1))
self.assertEquals(2, self._table.GetColumnIndex(self.COL2))
def testGetColumnByIndex(self):
self.assertEquals(self.COL0, self._table.GetColumnByIndex(0))
self.assertEquals(self.COL1, self._table.GetColumnByIndex(1))
self.assertEquals(self.COL2, self._table.GetColumnByIndex(2))
def testGetByIndex(self):
self.assertRowsEqual(self.ROW0, self._table.GetRowByIndex(0))
self.assertRowsEqual(self.ROW0, self._table[0])
self.assertRowsEqual(self.ROW2, self._table.GetRowByIndex(2))
self.assertRowsEqual(self.ROW2, self._table[2])
def testSlice(self):
self.assertRowListsEqual([self.ROW0, self.ROW1], self._table[0:2])
self.assertRowListsEqual([self.ROW2], self._table[-1:])
def testGetByValue(self):
rows = self._table.GetRowsByValue({self.COL0: 'Abc'})
self.assertEquals([self.ROW1, self.ROW2], rows)
rows = self._table.GetRowsByValue({self.COL2: 'Opq'})
self.assertEquals([self.ROW1], rows)
rows = self._table.GetRowsByValue({self.COL3: 'Foo'})
self.assertEquals([self.ROW1], rows)
def testGetIndicesByValue(self):
indices = self._table.GetRowIndicesByValue({self.COL0: 'Abc'})
self.assertEquals([1, 2], indices)
indices = self._table.GetRowIndicesByValue({self.COL2: 'Opq'})
self.assertEquals([1], indices)
indices = self._table.GetRowIndicesByValue({self.COL3: 'Foo'})
self.assertEquals([1], indices)
def testAppendRowDict(self):
self._table.AppendRow(self.EXTRAROW)
self.assertEquals(4, self._table.GetNumRows())
self.assertEquals(self.EXTRAROW, self._table[len(self._table) - 1])
def testAppendRowList(self):
self._table.AppendRow(self._GetRowValsInOrder(self.EXTRAROW))
self.assertEquals(4, self._table.GetNumRows())
self.assertEquals(self.EXTRAROW, self._table[len(self._table) - 1])
def testSetRowDictByIndex(self):
self._table.SetRowByIndex(1, self.EXTRAROW)
self.assertEquals(3, self._table.GetNumRows())
self.assertEquals(self.EXTRAROW, self._table[1])
def testSetRowListByIndex(self):
self._table.SetRowByIndex(1, self._GetRowValsInOrder(self.EXTRAROW))
self.assertEquals(3, self._table.GetNumRows())
self.assertEquals(self.EXTRAROW, self._table[1])
def testRemoveRowByIndex(self):
self._table.RemoveRowByIndex(1)
self.assertEquals(2, self._table.GetNumRows())
self.assertEquals(self.ROW2, self._table[1])
def testRemoveRowBySlice(self):
del self._table[0:2]
self.assertEquals(1, self._table.GetNumRows())
self.assertEquals(self.ROW2, self._table[0])
def testIteration(self):
ix = 0
for row in self._table:
self.assertEquals(row, self._table[ix])
ix += 1
def testClear(self):
self._table.Clear()
self.assertEquals(0, len(self._table))
def testMergeRows(self):
# This merge should fail without a merge rule. Capture stderr to avoid
# scary error message in test output.
stderr = sys.stderr
sys.stderr = cStringIO.StringIO()
self.assertRaises(ValueError, self._table._MergeRow, self.ROW0a, self.COL0)
sys.stderr = stderr
# Merge but stick with current row where different.
self._table._MergeRow(self.ROW0a, self.COL0,
merge_rules = { self.COL3: 'accept_this_val' })
self.assertEquals(3, len(self._table))
self.assertRowsEqual(self.ROW0, self._table[0])
# Merge and use new row where different.
self._table._MergeRow(self.ROW0a, self.COL0,
merge_rules = { self.COL3: 'accept_other_val' })
self.assertEquals(3, len(self._table))
self.assertRowsEqual(self.ROW0a, self._table[0])
# Merge and combine column values where different
self._table._MergeRow(self.ROW1a, self.COL2,
merge_rules = { self.COL3: 'join_with: ' })
self.assertEquals(3, len(self._table))
final_row = dict(self.ROW1a)
final_row[self.COL3] = self.ROW1[self.COL3] + ' ' + self.ROW1a[self.COL3]
self.assertRowsEqual(final_row, self._table[1])
def testMergeTablesSameCols(self):
other_table = self._CreateTableWithRows(self.COLUMNS,
[self.ROW0b, self.ROW1a, self.ROW2])
self._table.MergeTable(other_table, self.COL2,
merge_rules = { self.COL3: 'join_with: ' })
final_row0 = self.ROW0b
final_row1 = dict(self.ROW1a)
final_row1[self.COL3] = self.ROW1[self.COL3] + ' ' + self.ROW1a[self.COL3]
final_row2 = self.ROW2
self.assertRowsEqual(final_row0, self._table[0])
self.assertRowsEqual(final_row1, self._table[1])
self.assertRowsEqual(final_row2, self._table[2])
def testMergeTablesNewCols(self):
self.assertFalse(self._table.HasColumn(self.EXTRACOL))
other_rows = [self.EROW0, self.EROW1, self.EROW2]
other_table = self._CreateTableWithRows(self.EXTRACOLUMNS, other_rows)
self._table.MergeTable(other_table, self.COL2,
allow_new_columns=True,
merge_rules = { self.COL3: 'join_by_space' })
self.assertTrue(self._table.HasColumn(self.EXTRACOL))
self.assertEquals(5, self._table.GetNumColumns())
self.assertEquals(1, self._table.GetColumnIndex(self.EXTRACOL))
final_row0 = dict(self.ROW0)
final_row0[self.EXTRACOL] = self.EROW0[self. |
tmetsch/python-dtrace | examples/ctypes/syscall_by_zone.py | Python | mit | 925 | 0 | #!/usr/bin/env python
"""
Use the Python DTrace consumer and count syscalls by zone.
Created on Oct 10, 2011
@author: tmetsch
"""
from __future__ import print_function
import time
from ctypes import cast, c_char_p, c_int
from dtrace_ctypes import consumer
SCRIPT = 'syscall:::entry { @num[zonename] = count(); }'
def walk(data, _):
"""
Nice formatted aggregate walker.
"""
tmp = data.contents.dtada_data
name = cast(tmp + 16, c_char_p).value
count = consumer.deref(tmp + 272, c_int).value
print('Zone "{0:s}" made {1:d} syscalls.'.format(name.decode(), count))
return 0
def main():
"""
| Run DTrace...
"""
dtrace = consumer.DTraceConsumerThread(SCRIPT, walk_func=walk)
dtrace.start()
# we will stop t | he thread after some time...
time.sleep(5)
# stop and wait for join...
dtrace.stop()
dtrace.join()
if __name__ == '__main__':
main()
|
3dfxsoftware/cbss-addons | price_structure/model/sale.py | Python | gpl-2.0 | 10,026 | 0.005286 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Vauxoo C.A.
# Planified by: Nhomar Hernandez
# Audited by: Vauxoo C.A.
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from openerp.osv import osv, fields
import openerp.tools as tools
from openerp.tools.translate import _
from tools import config
import openerp.netsvc as netsvc
import decimal_precision as dp
class sale_order_line(osv.Model):
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False,
packaging=False, fiscal_position=False, flag=False,
context=None):
'''
Overridden the method of product line sales, to replace the unit price calculation and selection of the cost structure
that handles the product, and later to filter the prices for the product selected
'''
if context is None:
context = {}
price_obj = self.pool.get('product.pricelist')
product_obj = self.pool.get('product.product')
product_brw = product and product_obj.browse(
cr, uid, product, context=context)
res = super(
sale_order_line, self).product_id_change(cr, uid, ids, pricelist,
product, qty=qty,
uom=uom, qty_uos=qty_uos,
uos=uos, name=name,
partner_id=partner_id,
lang=lang, update_tax=update_tax,
date_order=date_order,
packaging=packaging, fiscal_position=fiscal_position,
flag=flag, context=context)
res.get('value', False) and product_brw and\
product_brw.uom_id and\
res.get('value', False).update({'product_uom': product_brw.uom_id.id})
if context.get('price_change', False):
price = price_obj.price_get(cr, uid, [context.get(
'price_change', False)], product, qty, context=context)
res.get('value', {}).update({'price_unit': round(
price.get(context.get('price_change', False)), 2)})
res.get('value', False) and\
product_brw and product_brw.categ_id and\
res.get('value', False).update({'categ_id': product_brw.categ_id.id})
res.get('value', False) and 'price_unit' in res.get(
'value', False) and res['value'].pop('price_unit')
return res
def price_unit(self, cr, uid, ids, price_list, product_id, qty,
context=None):
'''
Calculating the amount of model _compute_price method product.uom
'''
if context is None:
context = {}
res = {'value': {}}
if price_list and product_id and qty:
price_obj = self.pool.get('product.pricelist')
price = price_obj.price_get(cr, uid, [price_list], product_id, qty,
context=context)
res['value'].update({'price_unit': round(
price.get(price_list), 2)})
return res
#
_inherit = 'sale.order.line'
_columns = {
'product_id': fields.many2one('product.product', 'Product',
domain=[('sale_ok', '=', True)], change_default=True),
'price_list_ids': fields.many2one('product.pricelist', 'Select Price'),
'cost_structure_id': fields.many2one('cost.structure',
'Cost Structure'),
'categ_id': fields.many2one('product.category', 'Category',
help='Category by product selected'),
}
class sale_order(osv.Model):
_inherit = 'sale.order'
def _price_status(self, cr, uid, ids, field_name, arg, context=None):
'''
Check That the products sold are not sold at a price less than or greater than the price rago allocated in the product.
Failure to comply with this will print a message informing the product that is not complying with this requirement
'''
if context is None:
context = {}
if not ids:
return {}
res = {}
product = []
context.update({'query': False})
pricelist_obj = self.pool.get('product.pricelist')
for order in len(ids) == 1 and\
self.browse(cr, uid, ids, context=context) or []:
for line in order.order_line:
price_compute = line.product_id and [pricelist_obj.price_get(
cr, uid, [i.price_list_id and i.price_list_id.id],
line.product_id.id, line.product_uom_qty,
context=context).get(i.price_list_id.id)\
for i in line.product_id.price_list_item_ids or\
line.product_id.category_item_ids]
property_cost_structure = line and line.product_id and\
line.product_id.property_cost_structure and\
line.product_id.property_cost_structure.id or False
if property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) <\
round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio menor al\
estimado para su venta' % line.product_id.name)
res[order.id] = {'status_bool': True}
elif property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) > round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio mayor al\
estimado para su venta' % line.product_id.name)
res[order.id] = {'status_bool': True}
elif not property_cost_structure:
product.append(
u'El producto %s no tiene una estructura de costo'\
% line.product_id.name)
res[order.id] = {'status_bool': True}
if product:
res[order.id] = '\n'.join(product)
else:
res[order.id] = {'status_bool': False}
product = []
res[order.id] = '\n'.join(product)
return res
_columns = {
'status_price': fields.function(_price_status, method=True,
type="text", store=True, string='Status Price'),
'status_bool': fields.function(_price_status, method=True,
type="boolean", string='Status Price'),
}
_defaults = {
'status_bool': False
}
def price_unit_confirm(self, cr, uid, ids, context=None):
'''
Workflow condition does not allow the sale proces | s if at least one product is being sold in the price range set out in its cost structure
'''
if context is None:
| |
willowtreeapps/tango-core | tests/errors/importerror.py | Python | bsd-3-clause | 77 | 0 | """
site: importerror
routes:
exports:
""" |
impo | rt doesnotexist
doesnotexist
|
larsks/muxdemux | muxdemux/writer.py | Python | gpl-3.0 | 4,581 | 0 | import cbor
import hashlib
import logging
import zlib
from .common import * # NOQA
LOG = logging.getLogger(__name__)
default_hashalgo = 'sha256'
state_bos = 0
state_metadata = 1
state_data = 2
state_eos = 3
class MuxError(Exception):
pass
class InvalidState(MuxError):
pass
class StreamWriter(object):
'''Writes one part to a mux stream.
A mux stream is a series for cbor-encoded dictionaries. Each
chunk has a 'type' attribute that identifies the chunk type. A
part has the following format:
+----------------------------------------+
| beginning-of-stream | blktype_bos |
+----------------------------------------+
| metadata (optional) | blktype_metadata |
+----------------------------------------+
| data0 | blktype_data |
. .
. .
| dataN | blktype_data |
+----------------------------------------+
| end-of-stream | blktype_eos |
+----------------------------------------+
Multiple parts may be concatenated to form a stream.
'''
def __init__(self, fh,
name=None, hashalgo=None, writehash=False,
compress=False):
self.fh = fh
self.name = name
self.hashalgo = hashalgo if hashalgo else default_hashalgo
self.writehash = writehash
self.compress = compress
self.state = state_bos
self.metadata = {}
self.byteswritten = 0
if self.writehash:
self.ctx = self._get_hash_context()
def _write_header(self):
'''Writes out a header block. The header block contains
information about the stream:
- version: the mux format version
- name (optional): name of this stream
- hashalgo (optional): hash algorithm used for checksums
- compress (optional): true if data is compressed
'''
if self.state != state_bos:
raise InvalidState()
header = {'version': mux_version}
if self.name:
header['name'] = self.name
if self.writehash:
header['hashalgo'] = self.hashalgo
if self.compress:
header['compress'] = True
self._write_block(blktype_bos, **header)
self.state = state_metadata
def _write_metadata(self):
'''Writes out a metadata block. A metadata block can
contains arbitrary key/value pairs in the 'metadata' key.'''
if self.state != state_metadata:
raise InvalidState()
if self.metadata:
self._write_block(blktype_metadata,
metadata=self.metadata)
self.state = state_data
def _write_block(self, blktype, **kwargs):
| LOG.debug('writing block: type=%s, content=%s',
blktype, repr(kwargs))
cbor.dump(dict(blktype=blktype, **kwargs), s | elf.fh)
def _get_hash_context(self):
return getattr(hashlib, self.hashalgo)()
def add_metadata(self, k, v):
self.metadata[k] = v
def write(self, data):
'''Write a data block to the mux stream.'''
# Write out the header if we haven't already.
if self.state == state_bos:
self._write_header()
# Write out the metadata if we haven't already.
if self.state == state_metadata:
self._write_metadata()
# Blow up if something is wrong.
if self.state != state_data:
raise InvalidState()
if self.compress:
data = zlib.compress(data)
if self.writehash:
self.ctx.update(data)
self.byteswritten += len(data)
self._write_block(blktype_data, data=data)
def write_iter(self, data):
'''Write data blocks to the mux stream from an iterator.'''
for chunk in data:
self.write(chunk)
def finish(self):
'''Close the stream by writing an end-of-stream block.'''
if self.state == state_bos:
self._write_header()
if self.state == state_metadata:
self._write_metadata()
if self.state != state_data:
raise InvalidState()
hashargs = {}
if self.writehash:
hashargs['digest'] = self.ctx.digest()
self._write_block(blktype_eos,
size=self.byteswritten,
**hashargs)
self.state = state_eos
|
JSLBen/KnowledgeTracing | codes/AssistmentsProperties.py | Python | mit | 8,942 | 0.006263 | # datapath config
# data folder location
data_folder = '/home/data/jleeae/ML/e_learning/KnowledgeTracing/data/'
csv_original_folder = data_folder + 'csv_original/'
csv_rnn_data_folder = data_folder + 'csv_rnn_data/'
pkl_rnn_data_folder = data_folder + 'pkl_rnn_data/'
# csv_original
Assistments2009_csv_original = csv_original_folder + 'skill_builder_data.csv'
Assistments2009_csv_original_corrected = csv_original_folder + 'skill_builder_data_corrected.csv'
Assistments2012_csv_original_problem_contents = csv_original_folder + 'ASSISTmentsProblems.csv'
Assistments2012_csv_original = csv_original_folder + '2012-2013-data-with-predictions-4-final.csv'
Assistments2012_csv_original_without_actions = csv_original_folder + '2012-2013-data-with-predictions-4-final-without-actions.csv'
Assistments2015_csv_original = '2015_100_skill_builders_main_problems.csv'
class AssistmentsProperties(object):
def __init__(self, version):
self.version = version
if ('2009' == version):
self.set2009Attr()
elif ('2012' == version):
self.set2012Attr()
elif ('2015' == version):
self.set2015Attr()
else:
print('{} yr is not realized'.format(version))
exit(1)
def set_datapath(self, datapath):
self.datapath = datapath
# process_config
# method = {'default', 'sliding_window'}
# has_scaffolding = {True, False}
# count_no_skill_id = {True, False}
# has_test_mode = {True, False}
# allow_multi_skills = {True, False}
# one_hot = {True, False}
# window_length: int
def get_datapath(self, ext='csv', is_original=True, process_config = None, is_problem_contents=False, is_training=True):
version = {
'2009': '2009',
'2012': '2012',
'2015': '2015'
}.get(self.version, None)
if (None == version):
print('{} version not yet realized'.format(self.version))
exit(1)
_ext = {
'csv': 'csv',
'pkl': 'pkl'
}.get(ext, None)
if (None == _ext):
print('{} extension not yet realized'.format(ext))
exit(1)
if ('datapath' not in self.__dict__):
if ('csv' == ext and is_original):
if ('2009' == self.version):
datapath = Assistments2009_csv_original_corrected
elif ('2012' == self.version):
if (is_problem_contents):
datapath = Assistments2012_csv_original_problem_contents
else:
datapath = Assistments2012_csv_original_without_actions
elif ('2015' == self.version):
datapath = Assistments2015_csv_original
else:
datapath = self.get_processed_datapath(ext, process_config, is_problem_contents, is_training)
return datapath
def get_processed_datapath(self, ext='csv', process_config=None, is_problem_contents=False, is_training=True):
if (None == process_config):
print('process_config not set properly')
exit(1)
version = {
'2009': '2009',
'2012': '2012',
'2015': '2015'
}.get(self.version, None)
_ext = {
'csv': 'csv',
'pkl': 'pkl'
}.get(ext, None)
if (None == _ext):
print('{} extension not yet realized'.format(ext))
exit(1)
if (None == version):
print('{} version not yet realized'.format(self.version))
exit(1)
# TODO: name policy for problem_contents?
split_rate = process_config.get('split_rate', 0.2)
split_rate = str(int(split_rate * 100))
method = process_config.get('method', None)
has_scaffolding = process_config.get('has_scaffolding', None)
count_no_skill_id = process_config.get('count_no_skill_id', None)
has_test_mode = process_config.get('has_test_mode', None)
allow_multi_skills = process_config.get('allow_multi_skills', None)
one_hot = process_config.get('one_hot', None)
if ('csv' == ext):
datapath = csv_rnn_data_folder
elif ('pkl' == ext):
datapath = pkl_rnn_data_folder
datapath += 'split_' + split_rate + '/'
datapath += 'A' + version + '/'
if (one_hot):
datapath += 'one_hot/'
else:
datapath += 'not_one_hot/'
datapath += method
datapath += '/'
if ('sliding_window' == method):
# 'default' or 'same_as_training' or 'overlapping_last_element' or 'partition'
test_format = process_config.get('test_format', None)
datapath += test_format
datapath += '/'
window_length = process_config.get('window_length', None)
datapath += 'window_'
datapath += str(window_length)
datapath += '_'
if (is_training):
datapath = datapath + 'train_'
else:
datapath = datapath + 'test_'
if (has_scaffolding):
datapath += '1'
else:
datapath += '0'
if (count_no_skill_id):
datapath += '1'
else:
datapath += '0'
if (has_test_mode):
datapath += '1'
else:
datapath += '0'
if (allow_multi_skills):
datapath += '1'
else:
datapath += '0'
datapath += '.'
datapath += ext
return datapath
def set2009Attr(self):
self.order_id = 'order_id'
self.assignment_id = 'assignment_id'
self.user_id = 'user_id'
self.assistment_id = 'assistment_id'
self.problem_id = 'problem_id'
self.original = 'origina | l'
self.correct = 'correct'
self.attempt_count = 'attempt_count'
self.ms_first_response = 'ms_first_response'
self.tutor_mode = 'tutor_mode'
| self.answer_type = 'answer_type'
self.sequence_id = 'sequence_id'
self.student_class_id = 'student_class_id'
self.position = 'position'
self.type = 'type'
self.base_sequence_id = 'base_sequence_id'
self.skill_id = 'skill_id'
self.skill_name = 'skill_name'
self.teacher_id = 'teacher_id'
self.school_id = 'school_id'
self.hint_count = 'hint_count'
self.hint_total = 'hint_total'
self.overlap_time = 'overlap_time'
self.template_id = 'template_id'
self.answer_id = 'answer_id'
self.answer_text = 'answer_text'
self.first_action = 'first_action'
self.bottom_hint = 'bottom_hint'
self.opportunity = 'opportunity'
self.opportunity_original = 'opportunity_original'
def set2012Attr(self):
self.problem_log_id = 'problem_log_id'
self.skill = 'skill'
self.problem_id = 'problem_id'
self.user_id = 'user_id'
self.assignment_id = 'assignment_id'
self.assistment_id = 'assistment_id'
self.start_time = 'start_time'
self.end_time = 'end_time'
self.problem_type = 'problem_type'
self.original = 'original'
self.correct = 'correct'
self.bottom_hint = 'bottom_hint'
self.hint_count = 'hint_count'
self.actions = 'actions'
self.attempt_count = 'attempt_count'
self.ms_first_response = 'ms_first_response'
self.tutor_mode = 'tutor_mode'
self.sequence_id = 'sequence_id'
self.student_class_id = 'student_class_id'
self.position = 'position'
self.type = 'type'
self.base_sequence_id = 'base_sequence_id'
self.skill_id = 'skill_id'
self.teacher_id = 'teacher_id'
self.school_id = 'school_id'
self.overlap_time = 'overlap_time'
sel |
Nzbuu/SensorMonitor | SensorMonitor/sensor.py | Python | mit | 1,304 | 0 | import SensorMonitor
class SensorFactory:
def __init__(self, sensor_cls, factory_if):
self.sensor_cls = sen | sor_cls
self.facto | ry_if = factory_if
def create(self, **kwargs):
sensor_if = self.factory_if.create(**kwargs)
return self.sensor_cls(sensor_if)
class Sensor:
def __init__(self, sensor_if):
self.sensor_if = sensor_if
def get_measurement(self):
return self.sensor_if.read_data()
class SensorInterfaceFactory:
def __init__(self, default_if=None, default_args=None):
self.__dict = {}
self.default_if = default_if
if default_args is None:
self.default_args = {}
else:
self.default_args = default_args
def register(self, interface, interface_cls):
self.__dict[interface] = interface_cls
if not self.default_if:
self.default_if = interface
def create(self, interface=None, **kwargs):
if interface:
interface_cls = self.__dict[interface]
return interface_cls(**kwargs)
else:
interface_cls = self.__dict[self.default_if]
return interface_cls(**self.default_args)
class SensorInterface:
def __init__(self):
pass
def read_data(self):
return None
|
jinzekid/codehub | python/py3_6venv/encryption/rsa_generate_keypair.py | Python | gpl-3.0 | 1,156 | 0.012911 | # Author: Jason Lu
# RSA 是 Ron Rivest、Adi Shamir、Len Adleman 于 1977 年发明的加密算法。
# 公钥加密系统在加密和解密时分别使用不同的密钥。RSA 等就是公钥加密算法
# 在公钥加密系 | 统中,加密使用公钥(Public key),解密使用私钥(Private ke | y)。
# 这两种密钥 都需要通过算法生成。
# 公钥和私钥的密钥对可以通过 ssh-keygen 命令或 openssl 命令来创 建,
# 不过我们这里要学习的是用 PyCrypto 生成密钥的方法。
from Crypto.PublicKey import RSA
from Crypto import Random
INPUT_SIZE = 1024
def main():
random_func = Random.new().read #产生随机数的函数
key_pair = RSA.generate(INPUT_SIZE, random_func) #生成密钥对
private_pem = key_pair.exportKey() #获取pem格式的私钥
public_pem = key_pair.publickey().exportKey() #获取pem格式的公钥
with open('master-public.pem', 'w') as f:
f.write(public_pem.decode())
with open('master-private.pem', 'w') as f:
f.write(private_pem.decode())
print(private_pem.decode())
print(public_pem.decode())
if __name__ == '__main__':
main()
|
partp/gtg-services | GTG/plugins/export/templates.py | Python | gpl-3.0 | 6,174 | 0 | # -*- coding: utf-8 -*-
# Copyright (c) 2010 - Luca Invernizzi <invernizzi.l@gmail.com>
# 2012 - Izidor Matušov <izidor.matusov@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
""" Module for discovering templates and work with templates """
from glob | import glob
import os.path
import subprocess
import sys
import tempfile
import threading
from Cheetah.Template import Template as CheetahTemplate
from xdg.BaseDirectory import xdg_config_home
from gi.repository import GObject
TEMPLATE_PATHS = [
os.path.join(xdg_config_home, "gtg/plugins/export/export_templates"),
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "export_templates"),
]
def get_templates_paths():
""" Returns a list containing the full path f | or all the
available templates. """
template_list = []
for a_dir in TEMPLATE_PATHS:
template_list += glob(os.path.join(a_dir, "template_*"))
return template_list
class Template:
""" Representation of a template """
def __init__(self, path):
self._template = path
self._document_path = None
self._image_path = self._find_file("thumbnail_")
self._script_path = self._find_file("script_")
self._title, self._description = self._load_description()
def _find_file(self, prefix, suffix=""):
""" Find a file for the template given prefix and suffix """
basename = os.path.basename(self._template)
basename = basename.replace("template_", prefix)
path = os.path.join(os.path.dirname(self._template), basename)
path = os.path.splitext(path)[0] + '*' + suffix
possible_filles = glob(path)
if len(possible_filles) > 0:
return possible_filles[0]
else:
return None
def _load_description(self):
""" Returns title and description of the template
template description are stored in python module for easier l10n.
thus, we need to import the module given its path """
path = self._find_file("description_", ".py")
if not path:
return "", ""
dir_path = os.path.dirname(path)
if dir_path not in sys.path:
sys.path.append(dir_path)
module_name = os.path.basename(path).replace(".py", "")
try:
module = __import__(module_name, globals(), locals(),
['description'], 0)
return module.title, module.description
except (ImportError, AttributeError):
return "", ""
def _get_suffix(self):
""" Return suffix of the template """
return os.path.splitext(self._template)[1]
def get_path(self):
""" Return path to the template """
return self._template
def get_image_path(self):
""" Return path to the image """
return self._image_path
def get_title(self):
""" Return title of the template """
return self._title
def get_description(self):
""" Return description of the template """
return self._description
def get_document_path(self):
""" Return path to generated document.
Return None until generate() was successful."""
return self._document_path
def generate(self, tasks, plugin_api, callback):
""" Fill template and run callback when finished.
Created files are saved with the same suffix as the template. Opening
the final file determines its type based on suffix. """
document = CheetahTemplate(file=self.get_path(),
searchList=[{'tasks': tasks,
'plugin_api': plugin_api}])
suffix = ".%s" % self._get_suffix()
output = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
output.write(str(document))
self._document_path = output.name
output.close()
if self._script_path:
self._run_script(callback)
else:
callback()
def _run_script(self, callback):
""" Run script in its own thread and in other thread wait
for the result. """
document_ready = threading.Event()
def script():
""" Run script using the shebang of the script
The script gets path to a document as it only argument and
this thread expects resulting file as the only output of
the script. """
with open(self._script_path, 'r') as script_file:
first_line = script_file.readline().strip()
if first_line.startswith('#!'):
cmd = [first_line[2:], self._script_path,
self._document_path]
else:
cmd = None
self._document_path = None
if cmd is not None:
try:
self._document_path = subprocess.Popen(
args=cmd, shell=False,
stdout=subprocess.PIPE).communicate()[0]
except Exception:
pass
if self._document_path and not os.path.exists(self._document_path):
self._document_path = None
document_ready.set()
def wait_for_document():
""" Wait for the completion of the script and finish generation """
document_ready.wait()
GObject.idle_add(callback)
threading.Thread(target=script).start()
threading.Thread(target=wait_for_document).start()
|
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/core/cache/backends/base.py | Python | artistic-2.0 | 9,677 | 0.00155 | "Base Cache class."
from __future__ import unicode_literals
import time
import warnings
from django.core.exceptions import DjangoRuntimeWarning, ImproperlyConfigured
from django.utils.module_loading import import_string
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Stub class to ensure not passing in a `timeout` argument results in
# the default timeout
DEFAULT_TIMEOUT = object()
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Constructs the key used by all other methods. By default it prepends
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Defaults to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
return import_string(key_func)
return default_key_func
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION'))
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Returns the timeout value usable by this backend based upon the provided
timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else time.time() + timeout
def make_key(self, key, version=None):
"""Constructs the key used by all other methods. By default it
uses the key_func to generate a key (which, by default,
prepends the `key_prefix' and 'version'). A different key
function can be provided at the time of cache construction;
alternatively, you can subclass the cache backend to provide
custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError('subclasses of BaseCache must provide an add() method')
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError('subclasses of BaseCache must provide a get() method')
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError('subclasses of BaseCache must provide a set() method')
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError('subclasses of BaseCache must provide a delete() method')
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def get_or_set(self, key, default=None, timeout=DEFAULT_TIMEOUT, version=None):
"""
Fetch a given key from the cache. If the key does not exist,
the key is added and set to the default value. The default value can
also be any callable. If timeout is given, that timeout will be used
for the key; otherwise the default cache timeout will be used.
Returns the value of the key stored or retrieved on success,
False on error.
"""
if default is None:
raise ValueError('You need to specify a value.')
val = self.get(key, version=version)
if val is None:
if callable(default):
default = default()
val = self.add(key, default, timeout=timeout, version=version)
if val:
return self.get(key, version=version)
return val
def has_key(self, key, version=None):
"""
Returns True i | f the key is i | n the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError('subclasses of BaseCache must provide a clear() method')
def validate_key(self, key):
|
kingland/go-v8 | v8-3.28/tools/testrunner/local/testsuite.py | Python | mit | 7,417 | 0.010382 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import os
from . import statusfile
from . import utils
class TestSuite(object):
@staticmethod
def LoadTestSuite(root):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
suite = module.GetSuite(name, root)
finally:
if f:
f.close()
return suite
def __init__(self, name, root):
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
self.rules = None # dictionary mapping test path to list of outcomes
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
def shell(self):
return "d8"
def suffix(self):
return ".js"
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
# Used in the status file and for stdout printing.
def CommonTestName(self, testcase):
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path
def ListTests(self, context):
raise NotImplementedError
def VariantFlags(self, testcase, default_flags):
if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes):
return [[]]
return default_flags
def DownloadData(self):
pass
def ReadStatusFile(self, variables):
(self.rules, self.wildcards) = \
statusfile.ReadStatusFile(self.status_file(), variables)
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@staticmethod
def _FilterFlaky(flaky, mode):
return (mode == "run" and not flaky) or (mode == "skip" and flaky)
@staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
@staticmethod
def _FilterPassFail(pass_fail, mode):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
flaky_tests="dontcare",
slow_tests="dontcare",
pass_fail_tests="dontcare"):
filtered = []
used_rules = set()
for t in self.tests:
flaky = False
slow = False
pass_fail = False
testname = self.CommonTestName(t)
if testname in self.rules:
used_rules.add(testname)
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
t.outcomes = self.rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
flaky = statusfile.IsFlaky(t.outcomes)
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in self.wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add(rule)
t.outcomes = self.wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in self.wildcards"
flaky = flaky or statusfile.IsFlaky(t.outcomes)
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip or self._FilterFlaky(flaky, flaky_tests)
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
if not warn_unused_rules:
return
for rule in self.rules:
if rule not in used_rules:
print("Unused rule: %s -> %s" % (rule, self.rules[rule]))
for rule in self.wildcards:
if rule not in used_rules:
print("Unused rule: %s -> %s" % (rule, self.wildcards[rule]))
def FilterTestCasesByArgs(s | elf, args):
filtered = []
filtered_args = []
for a in args:
argpath = a.split(os.path.sep)
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
path = os.path.sep.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
filtered_args.append(path)
for t in self.tests:
for a in filtered_args:
if t.path.startswit | h(a):
filtered.append(t)
break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
raise NotImplementedError
def GetSourceForTest(self, testcase):
return "(no source available)"
def IsFailureOutput(self, output, testpath):
return output.exit_code != 0
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
execution_failed = self.IsFailureOutput(testcase.output, testcase.path)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
return execution_failed
def GetOutcome(self, testcase):
if testcase.output.HasCrashed():
return statusfile.CRASH
elif testcase.output.HasTimedOut():
return statusfile.TIMEOUT
elif self.HasFailed(testcase):
return statusfile.FAIL
else:
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS])
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
testcase.output.stdout = ""
testcase.output.stderr = ""
def CalculateTotalDuration(self):
self.total_duration = 0.0
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
|
woobe/h2o | py/testdir_single_jvm/test_frame_split_iris.py | Python | apache-2.0 | 1,612 | 0.007444 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_exec as h2e
DO_POLL = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=4, base_port=54323)
else:
h2o_hosts.build_cloud_with_hosts(base_port=543 | 23)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_frame_split(self):
h2o.beta_features = True
csvFilename = 'iris22.csv'
csvPathname = 'iris/' + csvFilename
hex_key = "iris.hex"
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key | , schema='local', timeoutSecs=10)
print "Just split away and see if anything blows up"
splitMe = hex_key
# don't split
for s in range(10):
fs = h2o.nodes[0].frame_split(source=splitMe, ratios=0.5)
split0_key = fs['split_keys'][0]
split1_key = fs['split_keys'][1]
split0_rows = fs['split_rows'][0]
split1_rows = fs['split_rows'][1]
split0_ratio = fs['split_ratios'][0]
split1_ratio = fs['split_ratios'][1]
print "Iteration", s, "split0_rows:", split0_rows, "split1_rows:", split1_rows
splitMe = split0_key
if split0_rows<=2:
break
if __name__ == '__main__':
h2o.unit_main()
|
ThreatConnect-Inc/tcex | tcex/pleb/env_path.py | Python | apache-2.0 | 1,603 | 0.000624 | """ENV Str"""
# standard library
import os
import re
from pathlib import Path
from typing import Any, Dict, Union
class _EnvPath(type(Path()), Path): # pylint: disable=E0241
"""A stub of Path with additional attribute."""
# store for the original value passed to EnvPath
original_value = None
class EnvPath(Path):
"""EnvPath custom pydantic model type."""
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
"""."""
field_schema.update(format='file-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator': # noqa: F821
"""."""
yield cls.validate
@classmethod
def validate(cls, value: Union[str, Path]) -> Path:
"""Replace any environment variables in the tcex.json file."""
if isinstance(value, Path):
return value
string = str(value)
for m in re.finditer(r'\${(env|envs|local|remote):(.*?)}', string):
try:
full_match = m.group(0)
env_type = m.group(1)
env_key = m.group(2)
if env_type != 'env':
raise ValueError(f'Invalid environment type found ({env_type})')
| env_value = os.getenv(env_key)
if env_value is not None:
string = string.replace(full_match, env_value)
except IndexError:
return string
# convert value to Path and return original value
p = _EnvPath(os.path.expanduser(string))
| p.original_value = value
return p
|
liquidkarma/pyneat | examples/xor/xor_neat.py | Python | gpl-2.0 | 1,835 | 0.016894 | #!/usr/bin/python
"""
pyNEAT
Copyright (C) 2007-2008 Brian Greer
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
## Simple XOR experiment. This should be the minimum amount of effort required
## to create an experiment with pyNEAT.
import pyNEAT
import math
#import profile
class XORTest(pyNEAT.Experiment):
def __init__(self):
pyNEAT.Experiment.__init__(self, 'XOR', 'xorstartgenes | ')
self.inputs = [[1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]]
self.targets = [0.0, 1.0, 1.0, 0 | .0]
def evaluate(self, network):
outputs = network.activate(inputs=self.inputs)
errorSum = 0
winner = True
for i in range(len(self.targets)):
target = self.targets[i]
output = outputs[i][0]
errorSum += math.fabs(output - target)
if (target > 0.5 and output < 0.5) or \
(target < 0.5 and output > 0.5):
winner = False
fitness = (4.0 - errorSum) ** 2
error = errorSum
return fitness, outputs, error, winner
if __name__ == '__main__':
pyNEAT.loadConfiguration('xor.ne')
xorTest = XORTest()
xorTest.run(useGUI=True)
#profile.run("xorTest.run()")
|
snapcore/snapcraft | tests/unit/build_providers/test_base_provider.py | Python | gpl-3.0 | 25,876 | 0.000966 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018-2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import os
import pathlib
import platform
import tempfile
from textwrap import dedent
from unittest.mock import Mock, call, patch
import fixtures
import pytest
from testtools.matchers import DirExists, EndsWith, Equals, Not
from snapcraft.internal import steps
from snapcraft.internal.build_providers import errors
from snapcraft.internal.meta.snap import Snap
from snapcraft.project import Project
from . import (
BaseProviderBaseTest,
MacBaseProviderWithBasesBaseTest,
ProviderImpl,
get_project,
)
class BaseProviderTest(BaseProviderBaseTest):
def test_initialize(self):
provider = ProviderImpl(project=self.project, echoer=self.echoer_mock)
self.assertThat(provider.project, Equals(self.project))
self.assertThat(provider.instance_name, Equals(self.instance_name))
self.assertThat(
provider.provider_project_dir,
EndsWith(
os.path.join("snapcraft", "projects", "project-name", "stub-provider")
),
)
self.assertThat(
provider.snap_filename,
Equals("project-name_1.0_{}.snap".format(self.project.deb_arch)),
)
def test_context(self):
with ProviderImpl(project=self.project, echoer=self.echoer_mock) as provider:
provider.shell()
fake_provider = provider
fake_provider.create_mock.assert_called_once_with("create")
fake_provider.destroy_mock.assert_called_once_with("destroy")
def test_context_fails_create(self):
create_mock = Mock()
destroy_mock = Mock()
class BadProviderImpl(ProviderImpl):
def create(self):
super().create()
create_mock("create bad")
raise errors.ProviderBaseError()
def destroy(self):
super().destroy()
destroy_mock("destroy bad")
with contextlib.suppress(errors.ProviderBaseError):
with BadProviderImpl(project=self.project, echoer=self.echoer_mock):
pass
create_mock.assert_called_once_with("create bad")
destroy_mock.assert_called_once_with("destroy bad")
def test_initialize_snap_filename_with_version(self):
self.project._snap_meta.version = "test-version"
provider = ProviderImpl(project=self.project, echoer=self.echoer_mock)
self.assertThat(
provider.snap_filename,
Equals("project-name_test-version_{}.snap".format(self.project.deb_arch)),
)
def test_launch_instance(self):
self.useFixture(fixtures.EnvironmentVariable("SNAP_VERSION", "4.0"))
provider = ProviderImpl(project=self.project, echoer=self.echoer_mock)
provider.start_mock.side_effect = errors.ProviderInstanceNotFoundError(
instance_name=self.instance_name
)
provider.launch_instance()
provider.launch_mock.assert_any_call()
provider.start_mock.assert_any_call()
provider.save_info_mock.assert_called_once_with(
{
"data": {
"base": "core20",
"created-by-snapcraft-version": "4.0",
"host-project-directory": self.project._project_dir,
}
}
)
self.assertThat(
provider.run_mock.mock_calls,
Equals(
[
call(["mv", "/var/tmp/L3Jvb3QvLmJhc2hyYw==", "/root/.bashrc"]),
call(["chown", "root:root", "/root/.bashrc"]),
call(["chmod", "0600", "/root/.bashrc"]),
call(
[
"mv",
"/var/tmp/L2Jpbi9fc25hcGNyYWZ0X3Byb21wdA==",
"/bin/_snapcraft_prompt",
]
),
call(["chown", "root:root", "/bin/_snapcraft_prompt"]),
call(["chmod", "0755", "/bin/_snapcraft_prompt"]),
call(
[
"mv",
"/var/tmp/L2V0Yy9hcHQvc291cmNlcy5saXN0",
"/etc/apt/sources.list",
| ]
),
call(["chown", "root:root", "/etc/apt/sources.list"]),
call(["chmod", "0644", "/etc/apt/sources.list"]),
call(
[
"mv",
"/var/tmp/L2V0Yy9hcHQvc291cmNlcy5saXN0LmQvZGVmYXVsdC5zb3VyY | 2Vz",
"/etc/apt/sources.list.d/default.sources",
]
),
call(
[
"chown",
"root:root",
"/etc/apt/sources.list.d/default.sources",
]
),
call(["chmod", "0644", "/etc/apt/sources.list.d/default.sources"]),
call(
[
"mv",
"/var/tmp/L2V0Yy9hcHQvc291cmNlcy5saXN0LmQvZGVmYXVsdC1zZWN1cml0eS5zb3VyY2Vz",
"/etc/apt/sources.list.d/default-security.sources",
]
),
call(
[
"chown",
"root:root",
"/etc/apt/sources.list.d/default-security.sources",
]
),
call(
[
"chmod",
"0644",
"/etc/apt/sources.list.d/default-security.sources",
]
),
call(
[
"mv",
"/var/tmp/L2V0Yy9hcHQvYXB0LmNvbmYuZC8wMC1zbmFwY3JhZnQ=",
"/etc/apt/apt.conf.d/00-snapcraft",
]
),
call(["chown", "root:root", "/etc/apt/apt.conf.d/00-snapcraft"]),
call(["chmod", "0644", "/etc/apt/apt.conf.d/00-snapcraft"]),
call(["apt-get", "update"]),
call(["apt-get", "dist-upgrade", "--yes"]),
call(["apt-get", "install", "--yes", "apt-transport-https"]),
call(["snap", "unset", "system", "proxy.http"]),
call(["snap", "unset", "system", "proxy.https"]),
]
),
)
self.assertThat(provider.provider_project_dir, DirExists())
def test_launch_instance_with_proxies(self):
provider = ProviderImpl(
project=self.project,
echoer=self.echoer_mock,
build_provider_flags={
"http_proxy": "http://1.2.3.4:8080",
"https_proxy": "http://2.3.4.5:8080",
},
)
provider.launch_instance()
provider.run_mock.assert_has_calls(
[
call(["snap", "set", "system", "proxy.http=http://1.2.3.4:8080"]),
call(["snap", "set", "system", "proxy.https=http://2.3.4.5:8080"]),
]
)
def test_launch_instance_with_cert_file(self):
test_certs = pathlib.Pat |
Mariaanisimova/pythonintask | INBa/2015/Sarocvashin_M/task_8_23.py | Python | apache-2.0 | 1,816 | 0.02439 | #Задача 8. Вариант 23.
#1-50. Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка. Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений. Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получ | али больше тех, кто запросил подсказку.#Чинкиров Валентин Владимирович
#20.05.2016
#Сароквашин Максим
import random
score = 10
words = ("Девятка ", "Шаха", "Семерка", "Москвич")
wor | d = random.choice(words)
letters = len(word)
print ("Я загадал некоторое слово связаное с машинами русского автопрома. В нём ", letters, " букв(/-ы)." )
ls = list(word)
random.shuffle(ls)
anagram = ls
i = 0
print(anagram)
answer = ""
while(answer!=word):
print("Назовёте слово сразу?(да/нет)")
answer = str(input())
if (answer == str("да")):
print("Введите свой ответ: ")
answer = str(input())
if (answer == word):
if (score < 0):
score == 0
print("Красава. Твой счет: ", str(score))
else:
print("Хочешь подсказку(да/нет)")
answer = str(input())
if (answer == str("да")):
print("Подсказка!", i+1, "буква: ", word[i])
score -= 2
else:
print("\nУжааассс??!")
break
input ("\nНажмите Enter для выхода.")
|
pedesen/pyligadb | pyligadb.py | Python | mit | 11,013 | 0.003632 | #!/usr/bin/env python
"""
The pyligadb module is a small python wrapper for the OpenLigaDB webservice.
The pyligadb module has been released as open source under the MIT License.
Copyright (c) 2014 Patrick Dehn
Due to suds, the wrapper is very thin, but the docstrings may be helpful.
Most of the methods of pyligadb return a list containing the requested data as
objects. So the attributes of the list items are accessible via the dot notation
(see example below). For a more detailed description of the return values see
the original documentation: http://www.openligadb.de/Webservices/Sportsdata.asmx
Example use (prints all matches at round 14 in season 2010 from the Bundesliga):
>>> from pyligadb.pyligadb import API
>>> matches = API().getMatchdataByGroupLeagueSaison(14, 'bl1', 2010)
>>> for match in matches:
>>> print u"{} vs. {}".format(match.nameTeam1, match.nameTeam2)
1. FSV Mainz 05 vs. 1. FC Nuernberg
1899 Hoffenheim vs. Bayer Leverkusen
...
...
"""
__version__ = "0.1.1"
try:
from suds.client import Client
except ImportError:
raise Exception("pyligadb requires the suds library to work. "
"https://fedorahosted.org/suds/")
class API:
def __init__(self):
self.client = Client('http://www.openligadb.de/Webservices/'
'Sportsdata.asmx?WSDL').service
def getAvailGroups(self, leagueShortcut, leagueSaison):
"""
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@param leagueSaison: A specific season (i.e. the date 2011 as integer)
@return: A list of available groups (half-final, final, etc.) for the
specified league and season.
"""
return self.client.GetAvailGroups(leagueShortcut, leagueSaison)[0]
def getAvailLeagues(self):
"""
@return: A list of all in OpenLigaDB available leagues.
"""
return self.client.GetAvailLeagues()[0]
def getAvailLeaguesBySports(self, sportID):
"""
@param sportID: The id related to a specific sport.
Use getAvailSports() to get all IDs.
@return: A list of all in OpenLigaDB available leagues of the specified
sport.
"""
return self.client.GetAvailLeaguesBySports(sportID)[0]
def getAvailSports(self):
"""
@return: An object containing all in OpenLigaDB available sports.
"""
return self.client.GetAvailSports()[0]
def getCurrentGroup(self, leagueShortcut):
"""
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@return: An object containing information about the current group for
the specified league (i.e. the round ("Spieltag") of the German
Bundesliga).
"""
return self.client.GetCurrentGroup(leagueShortcut)
def getCurrentGroupOrderID(self, leagueShortcut):
"""
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@return: The current group-ID for the specified league
(see getCurrentGroup()) as int value.
"""
return self.client.GetCurrentGroupOrderID(leagueShortcut)
def getGoalGettersByLeagueSaison(self, leagueShortcut, leagueSaison):
"""
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@param leagueSaison: A specific season (i.e. the date 2011 as integer).
@return: A list of scorers from the specified league and season, sorted
by goals scored.
"""
return self.client.GetGoalGettersByLeagueSaison(leagueShortcut,
leagueSaison)[0]
def getGoalsByLeagueSaison(self, leagueShortcut, leagueSaison):
"""
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@param leagueSaison: A specific season (i.e. the date 2011 as integer).
@return: A list of all goals from the specified league and season.
"""
return self.client.GetGoalsByLeagueSaison(leagueShortcut,
leagueSaison)[0]
def getGoalsByMatch(self, matchID):
"""
@param matchID: The ID of a specific Match. Use i.e. getLastMatch() to
obtain an ID.
@return: A list of all goals from the specified match or None.
"""
result = self.client.GetGoalsByMatch(matchID)
if result == "":
return None
else:
return result[0]
def getLastChangeDateByGroupLeagueSaison(self, groupOrderID, leagueShortcut,
leagueSaison):
"""
@param groupOrderID: The id of a specific group.
Use i.e. getCurrentGroupOrderID() to obtain an ID.
@param leagueShortcut: Shortcut for a specific leagueself.
Use getAvailLeagues() to get all shortcuts.
@param leagueSaison: A specific season (i.e. the date 2011 as integer).
@return: The date of the last change as datetime object.
"""
return self.client.GetLastChangeDateByGroupLeagueSaison(groupOrderID,
leagueShortcut, leagueSaison)
def getLastChangeDateByLeagueSaison(self, leagueShortcut, leagueSaison):
"""
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@param leagueSaison: A specific season (i.e. the date 2011 as integer).
@return: The date of the last change as datetime object.
"""
return self.client.GetLastChangeDateByLeagueSaison(leagueShortcut,
leagueSaison)
def getLastMatch(self, leagueShortcut):
"""
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@return: An object containing information about the last match from the
specified league.
"""
return self.client.GetLastMatch(leagueShortcut)
def getLastMatchByLeagueTeam(self, leagueID, teamID):
"""
@param leagueID: Shortcut for a specific league.
Use getAvailLeagues() to get all IDs.
@param teamID: The ID of a team, which cab be obtained by using
getTeamsByLeagueSaison()
@return: An object containing information about the last played match
"""
return self.client.GetLastMatchByLeagueTeam(leagueID, teamID)
def getMatchByMatchID(self, matchID):
"""
@param matchID: The ID of a specific Match. Use i.e. getNextMatch()
to obtain an ID.
@return: An object containing information about the specified match.
"""
return self.client.GetMatchByMatchID(matchID)
def getMatchdataByGroupLeagueSaison(self, groupOrderID, leagueShortcut,
leagueSaison):
"""
@param groupOrderID: The ID of a specific group.
Use i.e. getCurrentGroupOrderID() to obtain an ID.
@param leagueShortcut: Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@param leagueSaison: A specific season (i.e. the date 2011 as integer).
@return: A list of matches. Each list item is an object containing
detailed information about the specified group/round.
"""
return self.client.G | etMatchdataByGroupLeagueSaison(groupOrderID,
leagueShortcut, leagueSaison)[0]
def getMatchdataByGroupLeagueSaisonJSON(self, groupOrderID, leagueShortcut,
leagueSaison):
"""
@param groupOrderID: The ID of a specific group.
Use i.e. getCurrentGroupOrderID() to obtain an ID.
@param leagueShortcut: | Shortcut for a specific league.
Use getAvailLeagues() to get all shortcuts.
@param leagueSaison: A specific season (i.e. the date 2011 as integer).
@return: A JSON-Object containing detailed information about the
specified group/round.
"""
return |
dimagi/commcare-hq | corehq/form_processor/migrations/0020_rename_index_relationship.py | Python | bsd-3-clause | 352 | 0 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0019_allow_closed_by_null'),
]
operations = [
migrations.RenameField(
| model_name='commcarecaseindexsql',
ol | d_name='relationship',
new_name='relationship_id',
),
]
|
Kagami/shitsu | shitsu/modules/animecal.py | Python | gpl-3.0 | 3,252 | 0.001538 | ##################################################
# shitsu - tiny and flexible xmpp bot framework
# Copyright (C) 2008-2012 Kagami Hiiragi <kagami@genshiken.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##################################################
import re
import datetime
from shitsu.utils.BeautifulSoup import BeautifulSoup
from shitsu import modules
from shitsu import utils
reload(utils)
class Animecal(modules.MessageModule):
args = (0, 1, 2, 3)
def run(self, day=None, month=None, year=None):
"""[day [month [yeah]]]
Show anime list airing today (timezone UTC+9) or at specified date.
Source: http://animecalendar.net/
"""
now = datetime.datetime.utcnow() + datetime.timedelta(hours=9)
try:
if day: now = now.replace(day=int(day))
if month: now = now.replace(month=int(month))
if year: now = now.replace(year=int(year))
except Exception:
return
url = "http://animecalendar.net/%d/%d/%d" % (
now.year, now.month, now.day)
data = utils.get_url(url)
if not data:
return "can't get data"
if day:
info = "Anime for %02d.%02d.%d" % (now.day, now.month, now.year)
time_str = ""
else:
info = "Anime for today"
time_str = " now: %02d:%02d (UTC+9)" % (now.hour, now.minute)
anime_list = ["%s <%s>%s" % (info, url, time_str)]
anime_list_xhtml = [
"%s <<a href='%s'>%s</a>>%s" % (info, url, url, time_str)
]
soup = BeautifulSoup(data)
for div in soup.findAll("div", {"class": "ep_box"}):
anime = div.h3.a.string.strip()
ep = div.small.string.replace("\t", "").replace("\n", "")
anime | _list.append("%s (%s)" % (anime, ep))
line = "%s <span style='font-style: italic;'>(%s)</span>" % (
anime, ep)
if not day:
(h, m) = re.search("at (\d{2}):(\d{2})", ep).groups()
| start_time = now.replace(hour=int(h), minute=int(m))
delta = (now - start_time).seconds / 60
if delta > 0 and delta < 30:
line = "<span style='font-weight: bold;'>%s</span>" % line
anime_list_xhtml.append(line)
if len(anime_list) == 1:
s = "No anime :((("
anime_list.append(s)
anime_list_xhtml.append(s)
return "\n".join(anime_list), "<br />".join(anime_list_xhtml)
if __name__ == "__main__":
(text, xhtml) = Animecal(None).run()
print text
print "==="
print xhtml
|
acerlawson/my-shadowsocks-manage | ssexe.py | Python | mit | 3,352 | 0.062649 | #!/usr/bin/env python
__author__='acerlawson'
__email_ | _='acerlawson@gmail.com'
import sslib
import os
import time
from datetime import datetime,timedelta
import commands
import json
import ssmail
def TurnOn(usrd):
#send TurnOnMsg mail to the usrd
ssmail.SendMail(usrd,ssmail.TurnOnMsg(usrd))
def TurnOff(usrd):
#send TurnOffMsg mail to the usrd
ssmail.SendMail(usrd,ssmail.TurnOffMsg(usrd))
def Check():
#Write check command in history
sslib.Inhistory(' | Routine check')
#Read the usrlist
usrlist=sslib.GetUsrList()
#Get now time to check
nowdate=sslib.nowdate()
for usrname in usrlist:
#change 'dict' to 'MyUsr'
usr = sslib.MyUsr(usrlist[usrname])
#Check the usr
Result = usr.check()
print Result
if Result == 'turnon':
#Write the result in history
sslib.Inhistory('Turn on '+usrname +'\'s service')
TurnOn(usrlist[usrname])
if Result =='turnoff':
#Write the result in history
sslib.Inhistory('Turn off '+usrname +'\'s service')
TurnOff(usrlist[usrname])
#Write check command in history
sslib.Inhistory('End check')
def Start():
#Write start command in history
sslib.Inhistory('Command: '+'Start')
#try to get the piddir
try:
ssetc=sslib.GetEtc()
piddir=ssetc['piddir']
except:
piddir='/tmp'
#Through piddir get the pidpos
pidpos=os.path.join(piddir,'ssrun.pid')
#Prepare for running in the background
pid = os.fork()
if pid:
return
os.setsid()
#Write the pid in the ssrun.pid
f=open(pidpos,'a')
# print type(os.getpid())
f.write(str(os.getpid())+'\n')
f.close()
#Write in history
sslib.Inhistory('Start Run')
while 1:
Check()
ssetc=sslib.GetEtc()
try:
sleep=ssetc['sleep']
except:
sleep=3600
time.sleep(sleep)
def Stop():
#Write stop command in history
sslib.Inhistory('Command: '+'Stop')
#First, turn off all shadowsocks service process
usrlist=sslib.GetUsrList()
nowdate=sslib.nowdate()
for usrname in usrlist:
usr = sslib.MyUsr(usrlist[usrname])
if usr.offline():
sslib.Inhistory('Turn off '+usrname +'\'s service')
TurnOff(usrlist[usrname])
sslib.Success('Kill all service')
#Second, turn off the Start() service process
#To get pidpos
try:
ssetc=sslib.GetEtc()
piddir=ssetc['piddir']
except:
piddir='/tmp'
pidpos=os.path.join(piddir,'ssrun.pid')
if os.path.exists(pidpos):
f=open(pidpos,'r')
txt=f.read().split('\n')
f.close()
for i in txt:
if len(i) >0:
# print i
(status, output) = commands.getstatusoutput('kill '+i)
if status ==0:
sslib.Inhistory('Kill '+i)
# print output
#After kill the process,remember to remove 'ssrun.pid'
os.remove(pidpos)
sslib.Success('Stop')
else:
#No such file
sslib.Error(2,'No such file')
def View():
#Try to get pidpos
try:
ssetc=sslib.GetEtc()
piddir=ssetc['piddir']
except:
piddir='/tmp'
pidpos=os.path.join(piddir,'ssrun.pid')
#In order to show the pid of ssrun and how much the time it has ran
if os.path.exists(pidpos):
f=open(pidpos,'r')
txt=f.read().split('\n')
f.close()
for i in txt:
if len(i) >0:
# print i
(status, output) = commands.getstatusoutput('ps -eo pid,etime |grep '+i)
print output
#In order to show each usr shadowsocks service process running status
usrlist=sslib.GetUsrList()
for usrname in usrlist:
usr = sslib.MyUsr(usrlist[usrname])
usr.view()
|
AdaHeads/Hosted-Telephone-Reception-System | use-cases/.patterns/callee_phone_rings/test.py | Python | gpl-3.0 | 56 | 0.071429 | self.Step (Message = "Callee phone rings.")
| ||
nkgilley/home-assistant | homeassistant/components/mycroft/notify.py | Python | apache-2.0 | 908 | 0 | """Mycroft AI notification platform."""
import logging
from mycroftapi import MycroftAPI
from homeassistant.components.notify import BaseNotificationService
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Mycroft notification service."""
return MycroftNotificationService(hass.data["mycroft"])
class MycroftNotificationService(BaseNotificationService):
"""The Mycroft Notificati | on Service."""
def __init__(self, mycroft_ip):
"""Initialize the servic | e."""
self.mycroft_ip = mycroft_ip
def send_message(self, message="", **kwargs):
"""Send a message mycroft to speak on instance."""
text = message
mycroft = MycroftAPI(self.mycroft_ip)
if mycroft is not None:
mycroft.speak_text(text)
else:
_LOGGER.log("Could not reach this instance of mycroft")
|
lhillber/qops | figure2.py | Python | mit | 5,048 | 0.002181 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from figure3 import select, ket, exp
from matrix import ops
from measures import local_entropies_from_rhos, local_exp_vals_from_rhos
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib import rc
rc("text", usetex=True)
font = {"size": 11, "weight": "normal"}
mpl.rc(*("font",), **font)
mpl.rcParams["pdf.fonttype"] = 42
mpl.rcParams["text.latex.preamble"] = [
r"\usepackage{amsmath}",
r"\usepackage{sansmath}", # sanserif math
r"\sansmath",
]
if __name__ == "__main__":
names = {
"c1_f0": {"name": ket("010"), "ls": "-", "c": "C5", "m": "v"},
"exp-z": {"name": exp("\hat{\sigma_j}^z"), "ls": "-", "c": "C5", "m": "v"},
"exp-x": {"name": exp("\hat{\sigma_j}^x"), "ls": "-", "c": "C5", "m": "v"},
"s-2": {"name": " $s^{(2)}_j$", "ls": "-", "c": "C5", "m": "v"},
}
cmaps = ["inferno_r", "inferno"]
plot_fname = "figures/figure2/figure2_V5.pdf"
fig = plt.figure(figsize=(4.75, 3.7))
Skey = ["3.6", "3.13", "3.14", "5.4", "5.2"]
measures = ["exp-z", "s-2"]
IC = "c1_f0"
L = 18
T = (L - 1) * 3 + 1 # plot ylim
letts1 = [
r"$\mathrm{A}$",
r"$\mathrm{C}$",
r"$\mathrm{E}$",
r"$\mathrm{G}$",
r"$\mathrm{I}$",
]
letts2 = [
r"$\mathrm{B}$",
r"$\mathrm{D}$",
r"$\mathrm{F}$",
r"$\mathrm{H}$",
r"$\mathrm{J}$",
]
clett1 = ["w", "w", "w", "w", "w"]
clett2 = ["k", "k", "k", "w", "k"]
letts = [letts1, letts2]
cletts = [clett1, clett2]
for row, (meas, letti, cli) in enumerate(zip(measures, letts, cletts)):
grid = ImageGrid(
fig,
int("21" + str(1 + row)),
nrows_ncols=(1, 5),
direction="r | ow",
axes_pad=0.1,
add_all=True,
cbar_mode="single",
cbar_location="right",
cbar_size="20%",
cbar_pad=0.05,
)
for col, (S, lett, cl) in enumerate(zip(Skey, letti, cli)):
N, S = map(int, S.split("."))
ax = grid[col]
if N == 3:
sim = select(L=L, S=S, IC=IC, V="H", BC="0")
if sim is None: |
print("No sim!")
continue
S = sim["S"]
L = sim["L"]
IC = sim["IC"]
h5file = sim["h5file"]
if meas[0] == "e":
ticks = [-1, 1]
ticklabels = ["↑", "↓"]
else:
ticks = [0, 1]
ticklabels = ["$0$","$1$"]
vmin, vmax = ticks
d = h5file[meas]
elif N == 5:
der = "/home/lhillber/documents/research/cellular_automata/qeca/qops"
der = os.path.join(der, f"qca_output/hamiltonian/rule{S}/rho_i.npy")
one_site = np.load(der)
one_site = one_site.reshape(2000, 22, 2, 2)
one_site = one_site[::, 2:-2, :, :]
T5, L5, *_ = one_site.shape
d = np.zeros((T5, L5))
ti = 0
for t, rhoi in enumerate(one_site):
if t % 10 == 0:
if meas == "exp-z":
d[ti, :] = local_exp_vals_from_rhos(rhoi, ops["Z"])
elif meas == "s-2":
d[ti, :] = local_entropies_from_rhos(rhoi, order=2)
ti += 1
I = ax.imshow(
d[0:T],
origin="lower",
interpolation=None,
cmap=cmaps[row],
vmin=vmin,
vmax=vmax,
)
ax.cax.colorbar(I)
ax.cax.set_yticks(ticks)
ax.cax.set_yticklabels(ticklabels)
ax.set_xticks([0, 8, 17])
ax.set_yticks([i * (L - 1) for i in range(4)])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.text(0.5, 46, lett, color=cl, family="sans-serif", weight="bold")
if col == len(Skey) - 1:
ax.cax.text(
1.6,
0.5,
names[meas]["name"],
rotation=0,
transform=ax.transAxes,
ha="left",
va="center",
)
if row == 0 and col < 3:
ax.set_title(r"$T_{%d}$" % S)
elif row == 0 and col > 2:
ax.set_title(r"${F_{%d}}$" % S)
ax.tick_params(direction="out")
grid[0].set_yticklabels(["$"+str(i * (L - 1))+"$" for i in range(4)])
grid[0].set_xticklabels(["$0$", "$8$", "$17$"])
grid[0].set_xlabel("$j$", labelpad=0)
grid[0].set_ylabel("$t$", labelpad=0)
fig.subplots_adjust(hspace=0.1, left=0.05, top=0.93)
plt.savefig(plot_fname, dpi=300)
print("plot saved to ", plot_fname)
|
miketheprogrammer/dkxyz14 | hello-python/web.py | Python | mit | 296 | 0.013514 | import cherrypy
cherrypy.config.update({'server.socket_port': 9090})
cherrypy.config.update({'server.so | cket_host': '0.0.0.0'})
class Root(object):
@cherrypy.expose
def index(se | lf):
return "Hello World! From Python"
if __name__ == '__main__':
cherrypy.quickstart(Root(), '/') |
percolate/redset | redset/locks.py | Python | bsd-2-clause | 2,908 | 0 | """
Locks used to synchronize mutations on queues.
"""
import time
from redset.exceptions import LockTimeout
__all__ = (
'Lock',
)
# redis or redis-py truncates timestamps to the hundredth
REDIS_TIME_PRECISION = 0.01
class Lock(object):
"""
Context manager that implements a distributed lock with redis.
Based on Chris Lamb's version
(https://chris-lamb.co.uk/posts/distributing-locking-python-and-redis)
"""
def __init__(self,
redis,
key,
expires=None,
timeout=None,
poll_interval=None,
):
"""
Distributed locking using Redis SETNX and GETSET.
Usage::
with Lock('my_lock'):
print "Critical section"
:param redis: the redis client
:param key: the key the lock is labeled with
:param timeout: If another client has already obtained the lock,
sleep for a maximum of ``timeout`` seconds before
giving up. A value of 0 means we never wait. Defaults to 10.
:param expires: We consider any existing lock older than
``expires`` seconds to be invalid in order to
detect crashed clients. This value must be higher
than it takes the critical section to execute. Defaults to 20.
:param poll_interval: How often we should poll for lock acquisition.
Note that poll intervals below 0.01 don't make sense since
timestamps stored in redis are truncated to the hundredth.
Defaults to 0.2.
:raises: LockTimeout
"""
self.redis = redis
self.key = key
self.timeout = timeout or 10
self.expires = expires or 20
self.poll_interval = poll_interval or 0.2
def __enter__(self):
timeout = self.timeout
while timeout >= 0:
expires = time.time() + self.expires
if self.redis.setnx(self.key, expires):
# We gained the lock; enter critical section
return
current_value = self.redis.get(self.key)
# We found an expired lock and nobody raced us to replacing it
has_expired = (
current_value and
# bump the retrieved time by redis' precision so that we don't
# erroneously conside | r a recently acquired lock as expired
(float(current_value) + REDIS_TIME_PRECISION) < time.time() and
self.redis.getset(self.key, expires) == current_value
)
if has_expired:
retur | n
timeout -= self.poll_interval
time.sleep(self.poll_interval)
raise LockTimeout("Timeout while waiting for lock '%s'" % self.key)
def __exit__(self, exc_type, exc_value, traceback):
self.redis.delete(self.key)
|
puttarajubr/commcare-hq | custom/ilsgateway/tanzania/test/delivered.py | Python | bsd-3-clause | 3,569 | 0.005043 | from corehq.apps.commtrack.models import StockState
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusValues, SupplyPointStatusTypes
from custom.ilsgateway.tanzania.reminders import DELIVERY_PARTIAL_CONFIRM, NOT_DELIVERED_CONFIRM, \
DELIVERY_CONFIRM_DISTRICT, DELIVERY_CONFIRM_CHILDREN
from custom.ilsgateway.tanzania.test.utils import ILSTestScript
class ILSDeliveredTest(ILSTestScript):
def setUp(self):
super(ILSDeliveredTest, self).setUp()
def test_delivery_facility_received_no_quantities_reported(self):
script = """
5551234 > delivered
5551234 < {0}
""".format(DELIVERY_PARTIAL_CONFIRM)
self.run_script(script)
sps = SupplyPointStatus.objects.filter(location_id=self.loc1.get_id,
status_type="del_fac").order_by("-status_date")[0]
self.assertEqual(SupplyPointStatusValues.RECEIVED, sps.status_value)
self.assertEqual(SupplyPointStatusTypes.DELIVERY_FACILITY, sps.status_type)
def test_delivery_facility_received_quantities_reported(self):
script = """
5551234 > delivered jd 400 mc 569
5551234 < {0}
""".format("received stock report for loc1(Test Facility 1) R jd400 mc569")
self.run_script(script)
self.assertEqual(2, StockState.objects.count())
for ps in StockState.objects.all():
self.assertEqual(self.loc1.linked_supply_point().get_id, ps.case_id)
self.assertTrue(0 != ps.stock_on_hand)
def test_delivery_facility_not_received(self):
script = """
5551234 > sijapokea
5551234 < {0}
""".format(NOT_DELIVERED_CONFIRM)
self.run_script(script)
sps = SupplyPointStatus.objects.filter(location_id=self.loc1.get_id,
status_type="del_fac").order_by("-status_date")[0]
self.assertEqual(SupplyPointStatusValues.NOT_RECEIVED, sps.status_value)
self.assertEqual(SupplyPointStatusTypes.DELIVERY_FACILITY, sps.status_type)
def test_delivery_district_received(self):
script = """
555 > nimepokea
555 < {0}
5551234 < {1}
5555678 < {1}
""".format(DELIVERY_CONFIRM_DISTRICT % dict(contact_name="{0} {1}".format(self.user_dis.first_name,
self.user_dis.last_name),
facility_name=self.dis.name),
DELIVERY_CONFIRM_CHILDREN % dict(district_name=self.dis.name))
self.run_script(script)
sps = SupplyPointStatus.object | s.filter(location_id=self.dis.get_id,
status_type="del_dist").order_by("-status_date")[0]
self.assertEqual(SupplyPointStatusValues.RECEIVED, sps.status_value)
self.assertEqual(SupplyPointStatusTypes.DELIVERY_DISTRICT, sps.status_type)
def test_delivery_district_not_received | (self):
script = """
555 > sijapokea
555 < {0}
""".format(NOT_DELIVERED_CONFIRM)
self.run_script(script)
sps = SupplyPointStatus.objects.filter(location_id=self.dis.get_id,
status_type="del_dist").order_by("-status_date")[0]
self.assertEqual(SupplyPointStatusValues.NOT_RECEIVED, sps.status_value)
self.assertEqual(SupplyPointStatusTypes.DELIVERY_DISTRICT, sps.status_type)
|
Apreche/Presentoh | utils/jinja2/debug.py | Python | mit | 9,931 | 0.000302 | # -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from jinja2.utils import CodeType, missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
# how does the raise helper look like?
try:
exec "raise TypeError, 'foo'"
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
def _set_tb_next(self, next):
if tb_set_next is not None:
tb_set_next(self.tb, next and next.tb or None)
self._tb_next = next
def _get_tb_next(self):
return self._tb_next
tb_next = property(_get_tb_next, _set_tb_next)
del _get_tb_next, _set_tb_next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for priting or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
def chain_frames(self):
"""Chains the frames. Requires ctypes or the speedups extension."""
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.tb_next = tb
prev_tb = tb
prev_tb.tb_next = None
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
return self.exc_type, self.exc_value, self.frames[0].tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in xrange(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lin | eno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(TracebackFrameProxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# | reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
raise exc_info[0], exc_info[1], exc_info[2]
traceback = ProcessedTraceback(exc_info[0], exc_info[1], frames)
if tb_set_next is not None:
traceback.chain_frames()
return traceback
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in real_locals.iteritems():
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec code in globals, locals
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object.
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
|
opendoor/django-comlink | comlink/tests/__init__.py | Python | agpl-3.0 | 25 | 0.04 | from list_tests import | * | |
zakandrewking/cobrapy | cobra/manipulation/modify.py | Python | lgpl-2.1 | 9,924 | 0 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from ast import NodeTransformer
from itertools import chain
from six import iteritems
from warnings import warn
from cobra.core import Gene, Metabolite, Reaction
from cobra.core.gene import ast2str
from cobra.manipulation.delete import get_compiled_gene_reaction_rules
from cobra.util.solver import set_objective
_renames = (
(".", "_DOT_"),
("(", "_LPAREN_"),
(")", "_RPAREN_"),
("-", "__"),
("[", "_LSQBKT"),
("]", "_RSQBKT"),
(",", "_COMMA_"),
(":", "_COLON_"),
(">", "_GT_"),
("<", "_LT"),
("/", "_FLASH"),
("\\", "_BSLASH"),
("+", "_PLUS_"),
("=", "_EQ_"),
(" ", "_SPACE_"),
("'", "_SQUOT_"),
('"', "_DQUOT_"),
)
def _escape_str_id(id_str):
"""make a single string id SBML compliant"""
for c in ("'", '"'):
if id_str.startswith(c) and id_str.endswith(c) \
and id_str.count(c) == 2:
id_str = id_str.strip(c)
for char, escaped_char in _renames:
id_str = id_str.replace(char, escaped_char)
return id_str
class _GeneEscaper(NodeTransformer):
def visit_Name(self, node):
node.id = _escape_str_id(node.id)
return node
def escape_ID(cobra_model):
"""makes all ids SBML compliant"""
for x in chain([cobra_model],
cobra_model.metabolites,
cobra_model.reactions,
cobra_model.genes):
x.id = _escape_str_id(x.id)
cobra_model.repair()
gene_renamer = _GeneEscaper()
for rxn, rule in iteritems(get_compiled_gene_reaction_rules(cobra_model)):
if rule is not None:
rxn._gene_reaction_rule = ast2str(gene_renamer.visit(rule))
def rename_genes(cobra_model, rename_dict):
"""renames genes in a model from the rename_dict"""
recompute_reactions = set() # need to recomptue related genes
remove_genes = []
for old_name, new_name in iteritems(rename_dict):
# undefined if there a value matches a different key
# because dict is unordered
try:
gene_index = cobra_model.genes.index(old_name)
except ValueError:
gene_index = None
old_gene_present = gene_index is not None
new_gene_present = new_name in cobra_model.genes
if old_gene_present and new_gene_present:
old_gene = cobra_model.genes.get_by_id(old_name)
remove_genes.append(old_gene)
recompute_reactions.update(old_gene._reaction)
elif old_gene_present and not new_gene_present:
# rename old gene to new gene
gene = cobra_model.genes[gene_index]
# trick DictList into updating index
c | obra_model.genes._dict.pop(gene.id) # ugh
gene.id = new_name
cobra_model.genes[gene_index] = gene
elif not old_gene_present and new_gene_present:
pass
else: # not old gene_p | resent and not new_gene_present
# the new gene's _model will be set by repair
cobra_model.genes.append(Gene(new_name))
cobra_model.repair()
class Renamer(NodeTransformer):
def visit_Name(self, node):
node.id = rename_dict.get(node.id, node.id)
return node
gene_renamer = Renamer()
for rxn, rule in iteritems(get_compiled_gene_reaction_rules(cobra_model)):
if rule is not None:
rxn._gene_reaction_rule = ast2str(gene_renamer.visit(rule))
for rxn in recompute_reactions:
rxn.gene_reaction_rule = rxn._gene_reaction_rule
for i in remove_genes:
cobra_model.genes.remove(i)
def convert_to_irreversible(cobra_model):
"""Split reversible reactions into two irreversible reactions
These two reactions will proceed in opposite directions. This
guarentees that all reactions in the model will only allow
positive flux values, which is useful for some modeling problems.
cobra_model: A Model object which will be modified in place.
"""
warn("deprecated, not applicable for optlang solvers", DeprecationWarning)
reactions_to_add = []
coefficients = {}
for reaction in cobra_model.reactions:
# If a reaction is reverse only, the forward reaction (which
# will be constrained to 0) will be left in the model.
if reaction.lower_bound < 0:
reverse_reaction = Reaction(reaction.id + "_reverse")
reverse_reaction.lower_bound = max(0, -reaction.upper_bound)
reverse_reaction.upper_bound = -reaction.lower_bound
coefficients[
reverse_reaction] = reaction.objective_coefficient * -1
reaction.lower_bound = max(0, reaction.lower_bound)
reaction.upper_bound = max(0, reaction.upper_bound)
# Make the directions aware of each other
reaction.notes["reflection"] = reverse_reaction.id
reverse_reaction.notes["reflection"] = reaction.id
reaction_dict = {k: v * -1
for k, v in iteritems(reaction._metabolites)}
reverse_reaction.add_metabolites(reaction_dict)
reverse_reaction._model = reaction._model
reverse_reaction._genes = reaction._genes
for gene in reaction._genes:
gene._reaction.add(reverse_reaction)
reverse_reaction.subsystem = reaction.subsystem
reverse_reaction._gene_reaction_rule = reaction._gene_reaction_rule
reactions_to_add.append(reverse_reaction)
cobra_model.add_reactions(reactions_to_add)
set_objective(cobra_model, coefficients, additive=True)
def revert_to_reversible(cobra_model, update_solution=True):
"""This function will convert an irreversible model made by
convert_to_irreversible into a reversible model.
cobra_model : cobra.Model
A model which will be modified in place.
update_solution: bool
This option is ignored since `model.solution` was removed.
"""
warn("deprecated, not applicable for optlang solvers", DeprecationWarning)
reverse_reactions = [x for x in cobra_model.reactions
if "reflection" in x.notes and
x.id.endswith('_reverse')]
# If there are no reverse reactions, then there is nothing to do
if len(reverse_reactions) == 0:
return
for reverse in reverse_reactions:
forward_id = reverse.notes.pop("reflection")
forward = cobra_model.reactions.get_by_id(forward_id)
forward.lower_bound = -reverse.upper_bound
if forward.upper_bound == 0:
forward.upper_bound = -reverse.lower_bound
if "reflection" in forward.notes:
forward.notes.pop("reflection")
# Since the metabolites and genes are all still in
# use we can do this faster removal step. We can
# probably speed things up here.
cobra_model.remove_reactions(reverse_reactions)
def canonical_form(model, objective_sense='maximize',
already_irreversible=False, copy=True):
"""Return a model (problem in canonical_form).
Converts a minimization problem to a maximization, makes all variables
positive by making reactions irreversible, and converts all constraints to
<= constraints.
model: class:`~cobra.core.Model`. The model/problem to convert.
objective_sense: str. The objective sense of the starting problem, either
'maximize' or 'minimize'. A minimization problems will be converted to a
maximization.
already_irreversible: bool. If the model is already irreversible, then pass
True.
copy: bool. Copy the model before making any modifications.
"""
warn("deprecated, not applicable for optlang solvers", DeprecationWarning)
if copy:
model = model.copy()
if not already_irreversible:
convert_to_irreversible(model)
if objective_sense == "minimize":
# if converting min to max, reverse all the objective coefficients
for reaction in model.reactions:
reaction.objective_coefficient = - reaction.objective_coefficient
elif objective_sense != |
MathieuDuponchelle/meson | mesonbuild/mesonlib.py | Python | apache-2.0 | 42,764 | 0.003648 | # Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
import functools
import sys
import stat
import time
import platform, subprocess, operator, os, shutil, re
import collections
from enum import Enum
from functools import lru_cache
from mesonbuild import mlog
have_fcntl = False
have_msvcrt = False
# {subproject: project_meson_version}
project_meson_versions = {}
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
# In Windows and using the MSI installed executable.
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
def set_meson_command(mainfile):
global python_command
global meson_command
# On UNIX-like systems `meson` is a Python script
# On Windows `meson` and `meson.exe` are wrapper exes
if not mainfile.endswith('.py'):
meson_command = [mainfile]
elif os.path.isabs(mainfile) and mainfile.endswith('mesonmain.py'):
# Can't actually run meson with an absolute path to mesonmain.py, it must be run as -m mesonbuild.mesonmain
meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
else:
# Either run uninstalled, or full path to meson-script.py
meson_command = python_command + [mainfile]
# We print this value for unit tests.
if 'MESON_COMMAND_TESTS' in os.environ:
mlog.log('meson_command is {!r}'.format(meson_command))
def is_ascii_string(astring):
try:
if isinstance(astring, str):
astring.encode('ascii')
if isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array):
import locale
# W | arn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_wind | ows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning('''You are using {!r} which is not a Unicode-compatible '
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class MesonException(Exception):
'''Exceptions thrown by Meson'''
def get_msg_with_context(self):
s = ''
if hasattr(self, 'lineno') and hasattr(self, 'file'):
s = get_error_location_string(self.file, self.lineno) + ' '
s += str(self)
return s
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms=None, owner=None, group=None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self):
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s):
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built, subdir, fname):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
assert(isinstance(self.subdir, str))
assert(isinstance(self.fname, str))
def __str__(self):
return self.relative_name()
def __repr__(self):
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
@lru_cache(maxsize=None)
def from_source_file(source_root, subdir, fname):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir, fname):
return Fil |
insertnamehere1/maraschino | mobile.py | Python | mit | 32,117 | 0.008562 | # -*- coding: utf-8 -*-
"""Ressources to use Maraschino on mobile devices"""
import jsonrpclib
from flask import render_template
from maraschino import app, logger
from maraschino.tools import *
from maraschino.noneditable import *
global sabnzbd_history_slots
sabnzbd_history_slots = None
@app.route('/mobile/')
@requires_auth
def mobile_index():
xbmc = True
available_modules = Module.query.order_by(Module.position)
servers = XbmcServer.query.order_by(XbmcServer.position)
if servers.count() == 0:
xbmc = False
return render_template('mobile/index.html',
available_modules=available_modules,
xbmc=xbmc,
search=get_setting_value('search') == '1',
)
from modules.recently_added import get_recently_added_episodes, get_recently_added_movies, \
get_recently_added_albums, get_recent_xbmc_api_url
@app.route('/mobile/recent_episodes/')
@requires_auth
def recently_added_episodes():
xbmc = jsonrpclib.Server(get_recent_xbmc_api_url('recently_added_server'))
recently_added_episodes = get_recently_added_episodes(xbmc, mobile=True)
return render_template('mobile/xbmc/recent_episodes.html',
recently_added_episodes=recently_added_episodes[0],
using_db=recently_added_episodes[1],
)
@app.route('/mobile/recent_movies/')
@requires_auth
def recently_added_movies():
xbmc = jsonrpclib.Server(get_recent_xbmc_api_url('recently_added_movies_server'))
recently_added_movies = get_recently_added_movies(xbmc, mobile=True)
return render_template('mobile/xbmc/recent_movies.html',
recently_added_movies=recently_added_movies[0],
using_db=recently_added_movies[1],
)
@app.route('/mobile/recent_albums/')
@requires_auth
def recently_added_albums():
xbmc = jsonrpclib.Server(get_recent_xbmc_api_url('recently_added_albums_server'))
recently_added_albums = get_recently_added_albums(xbmc, mob | ile=True)
return render_template('mobile/xbmc/recent_albums.html',
recently_added_albums=recently_added_albums[0],
using_db=recently_added_albums[1],
)
@app.route('/mobile/xbmc/')
@requires_auth
def xbmc():
servers = XbmcServer.query.order_by(XbmcServer.position)
active_server = int(get_setting_value('active_server'))
return render_template('mobile/xbmc/xbmc.html',
servers=servers,
active_server=active_server,
)
@app.rout | e('/mobile/movie_library/')
@requires_auth
def movie_library():
try:
xbmc = jsonrpclib.Server(server_api_address())
sort = {'method': 'label', 'ignorearticle': True}
movies = xbmc.VideoLibrary.GetMovies(sort=sort, properties=['title', 'rating', 'year', 'thumbnail', 'tagline', 'playcount'])['movies']
except:
logger.log('Mobile :: XBMC :: Could not retrieve movie library', 'WARNING')
return render_template('mobile/xbmc/movie_library.html',
movies=movies,
)
@app.route('/mobile/tv_library/')
@requires_auth
def tv_library():
try:
xbmc = jsonrpclib.Server(server_api_address())
sort = {'method': 'label', 'ignorearticle': True}
TV = xbmc.VideoLibrary.GetTVShows(sort=sort, properties=['thumbnail'])['tvshows']
except Exception as e:
logger.log('Mobile :: XBMC :: Could not retrieve TV Shows: %s' % e, 'WARNING')
return render_template('mobile/xbmc/tv_library.html',
TV=TV,
)
@app.route('/mobile/tvshow/<int:id>/')
@requires_auth
def tvshow(id):
try:
xbmc = jsonrpclib.Server(server_api_address())
show = xbmc.VideoLibrary.GetSeasons(tvshowid=id, properties=['tvshowid', 'season', 'showtitle', 'playcount'], sort={'method': 'label'})['seasons']
except Exception as e:
logger.log('Mobile :: XBMC :: Could not retrieve TV Show [id: %i - %s]' % (id, e), 'WARNING')
return render_template('mobile/xbmc/tvshow.html',
show=show,
)
@app.route('/mobile/tvshow/<int:id>/<int:season>/')
@requires_auth
def season(id, season):
try:
xbmc = jsonrpclib.Server(server_api_address())
episodes = xbmc.VideoLibrary.GetEpisodes(tvshowid=id, season=season, sort={'method': 'episode'}, properties=['tvshowid', 'season', 'showtitle', 'playcount'])['episodes']
except Exception as e:
logger.log('Mobile :: XBMC :: Could not retrieve TV Show [id: %i, season: %i - %s]' % (id, season, e), 'WARNING')
return render_template('mobile/xbmc/season.html',
season=season,
episodes=episodes,
)
@app.route('/mobile/artist_library/')
@requires_auth
def artist_library():
try:
xbmc = jsonrpclib.Server(server_api_address())
sort = {'ignorearticle': True}
artists = xbmc.AudioLibrary.GetArtists(sort=sort)['artists']
except:
logger.log('Mobile :: XBMC :: Could not retrieve artists from audio library', 'WARNING')
artists = []
return render_template('mobile/xbmc/artists.html',
artists=artists,
)
@app.route('/mobile/artist_library/<int:artistid>/')
@requires_auth
def album_library(artistid):
try:
xbmc = jsonrpclib.Server(server_api_address())
version = xbmc.Application.GetProperties(properties=['version'])['version']['major']
params = {'sort': {'ignorearticle': True}, 'properties': ['year']}
if version < 12: # Eden
params['artistid'] = artistid
params['properties'].extend(['artistid', 'artist'])
else: # Frodo
params['filter'] = {'artistid': artistid}
albums = xbmc.AudioLibrary.GetAlbums(**params)['albums']
if version > 11: # Frodo
artist = xbmc.AudioLibrary.GetArtistDetails(artistid=artistid)['artistdetails']['label']
for album in albums:
album['artistid'] = artistid
album['artist'] = artist
except:
logger.log('Mobile :: XBMC :: Could not retrieve albums from audio library', 'WARNING')
albums = []
return render_template('mobile/xbmc/albums.html',
albums=albums,
)
@app.route('/mobile/artist_library/<int:artistid>/<int:albumid>/')
@requires_auth
def song_library(artistid, albumid):
try:
xbmc = jsonrpclib.Server(server_api_address())
version = xbmc.Application.GetProperties(properties=['version'])['version']['major']
params = {'sort': {'ignorearticle': True}, 'properties': ['album', 'track', 'title']}
if version < 12: # Eden
params['artistid'] = artistid
params['albumid'] = albumid
else: # Frodo
params['filter'] = {
'albumid': albumid
}
songs = xbmc.AudioLibrary.GetSongs(**params)['songs']
except:
logger.log('Mobile :: XBMC :: Could not retrieve songs from audio library', 'WARNING')
songs = []
return render_template('mobile/xbmc/songs.html',
songs=songs,
)
@app.route('/mobile/movie/<int:id>/info/')
@requires_auth
def movie_info(id):
try:
xbmc = jsonrpclib.Server(server_api_address())
properties = ['thumbnail', 'rating', 'director', 'genre', 'plot', 'year', 'trailer']
movie = xbmc.VideoLibrary.GetMovieDetails(movieid=id, properties=properties)['moviedetails']
except Exception as e:
logger.log('Mobile :: XBMC :: Could not retrieve movie details [id: %i - %s]' % (id, e), 'WARNING')
return render_template('mobile/xbmc/movie-details.html',
movie=movie
)
@app.route('/mobile/tvshow/<int:id>/info/')
@requires_auth
def tvshow_info(id):
try:
xbmc = jsonrpclib.Server(server_api_address())
properties = ['thumbnail', 'rating', 'studio', 'genre', 'plot']
show = xbmc.VideoLibrary.GetTVShowDetails(tvshowid=id, properties=properties)['tvshowdetails']
except Exception as e:
logger.log('Mobile :: XBMC :: Could not retrieve TV Show details [id: %i - %s]' % (id, e), 'WARNING')
return render_template('mobile/xbmc/tvshow-details.html',
show=show,
banners=get_setting_value('library_use_bannerart') == '1'
)
@app.route('/mobile/episode/<int:id>/info/')
@requires_ |
fjcaetano/pelican_admin | pelican_admin/modules.py | Python | bsd-3-clause | 757 | 0.002642 | __author__ = 'flaviocaetano'
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboar | d import modules
import psutil
class PelicanAdmin(modules.DashboardModule):
"""Dashboard module for Pelican service administration.
"""
title = 'Pelican Admin'
template = 'pelican_admin.html'
def __init__(self, *args, **kwargs):
super(PelicanAdmin, self).__init__(*args, **kwargs)
self.pelican_status = False
for p in psutil.process_iter():
try:
if "pelican" in str(p.cmdline).lower():
self | .pelican_status = True
break
except psutil.AccessDenied, e:
pass
def is_empty(self):
return False |
n-witt/EconstorCorpus | Han_the_Converter/processingPdfFiles/processingPdfFiles.py | Python | gpl-3.0 | 4,598 | 0.005437 | import time
import os
from pdfLib import PdfLib
import langdetect
from filter import Filter
import json
class ProcessWorker():
def __init__(self, filename, wd, od, logger, uq, fileExtension = u'.json'):
"""
wd -> working dir
od -> output dir
uq -> update queue
"""
self.logger = logger
self.filename = filename
self.wd = wd
self.od = od
self.uq = uq
self.fileExtension = fileExtension
self.outFilename = self.filename + self.fileExtension
self.langKey = u'lang'
self.plaintextKey = u'plaintext'
self.filenameKey = u'filename'
def process_data(self):
"""
This method is the entry point for the worker processes
"""
i = 0
self.logger.info(u"start processing {}.".format(self.filename))
start = time.time()
content = {}
try:
content = self.__loadFile()
if content.has_key(self.plaintextKey) and \
content.has_key(self.langKey) and \
content.has_key(self.filenameKey):
# does the file already have all the properties we want
# to create? if so, let's assume the file has already been
# processed and skip it
self.logger.warning(u"{} not written. Information already present. skipped".format(self.outFilename))
self.uq.put(('complete', self.filename))
return
# create or update the file with the new information
result = self.__getPlaintext()
with open(self.od + os.sep + self.outFilename, "w+") as f:
content.update(result)
f.write(json.dumps(content).decode("utf8"))
self.uq.put(('complete', self.filename))
except Exception as e:
self.logger.error(unicode(e))
self.uq.put(('broken', self.filename, e.message))
stop = time.ti | me()
self.logger.info(u"Took {:.2f}s.".format(stop-start))
i += 1
'''
gets plaintext from file at path "self.filename", does some normalization
and saves it into "outfile".
'''
def __getPlaintext(self):
# extract plaintext from pdf
paper = PdfLib(self.wd + os.sep + self.filename)
textBeginning = self.__guessDocBegining(self.filename)
plaintext = paper.pdf2txt(textBegi | nning, "max")
# normalize text
f = Filter(asString=plaintext)
plaintext = f.substitutions() \
.oneCharPerLine() \
.normalizeCaracters() \
.lower() \
.uselessCharacters() \
.multipleDots() \
.listEnum() \
.digits() \
.shortTokens() \
.multipleSpaces() \
.getResult()
# experience shows, that less than 6000 characters is mostly waste
if len(plaintext) > 6000:
result = {}
result[self.langKey] = self.__guessLang(plaintext)
result[self.plaintextKey] = plaintext
result[self.filenameKey] = self.filename
return result
else:
raise Exception(u"Document is too short.")
def __persist(self, text, filename):
with open(filename, "w") as f:
f.write(json.dumps(text).encode("utf8"))
self.logger.info(u"{} written.".format(filename))
def __guessDocBegining(self, filename):
if os.path.exists(self.wd + os.sep + filename):
"""
inspect the first 5 pages. when a page consists of more than 1500 characters,
assume this is the beginning of the text. Those values are based on experience,
not science ;)
"""
maxPages = 5
threshold = 1300
for p in range(1, maxPages):
paper = PdfLib(self.wd + os.sep + filename)
text = paper.pdf2txt(p)
numChar = len(text)
textLower = text.lower()
if numChar > threshold or textLower.find("abstract") != -1 or textLower.find("introduction") != -1:
return p
return maxPages
else:
self.logger.info(u"{} does not exist.".format(filename))
def __guessLang(self, text):
return langdetect.detect(text)
def __loadFile(self):
with open(self.od + os.sep + self.outFilename, "r") as f:
return json.loads(f.read()) |
alexland/levenshtein-in-cython | levpy/levenshtein.py | Python | mit | 791 | 0.025284 | #!/usr/local/bin/python3.4
# encoding: utf-8
import os
import sys
import string
import warnings
import numpy as NP
from functools import partial
warnings.filterwarnings('ignore')
def levenshtein_dist(w1, w2, LuT):
'''
returns: levenshtein distance as int
| pass in: two words as python strings;
this fn for the easy case in which word lengths are equal;
'''
lw1, lw2 = len(w1), len(w2)
if lw1 != lw2:
return 'the two words do not have equal length'
w1, w2 = w1.lower(), w2.lower()
t1 = NP.array([LuT[chr] for chr in w1])
t2 = NP.array( | [LuT[chr] for chr in w2])
tdiff = t2 - t1
return NP.where(tdiff==0, 0, 1).sum()
p_levenshtein_dist = partial(levenshtein_dist,
LuT = {k:v for v, k in enumerate(string.ascii_lowercase)})
print(p_levenshtein_dist('pistol', 'piston'))
|
Titan-C/learn-dmft | examples/twosite/plot_dop_A.py | Python | gpl-3.0 | 1,345 | 0 | # -*- coding: utf-8 -*-
"""
================================================
Following the Metal to Mott insulator Transition
================================================
Sequence of plots showing the transfer of spectral weight for a Hubbard
Model in the Bethe Lattice as the local dopping is increased.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
from __future__ import division, absolute_import, print_function
import matplotlib.pyplot as plt
import numpy as np
from slaveparticles.quantum import dos
axis = 'real'
u = 8.0
beta = 1e3
dop = [0.25, 0.5, 0.75, 0.9, 0.99]
out_file = axis+'_dop_b{}_U{}'.format(beta, u)
res = np.load(out_file+'.npy')
f, axes = plt.subplots(len(dop), sharex=True)
axes[0].set_title(r'$A(\omega)$ under doping U={} at '
'$\\beta=${}'.format(u, beta))
axes[-1].set_xlabel('$\\omega / t$')
f.subplots_adjust(hspace=0)
for ax | , n in zip(axes, dop):
ind = np.abs(res[:, 0] - n).argmin()
sim = res[ind, 1]
w = sim.omega
s = sim.GF[r'$\Sigma$']
ra = w + sim.mu - s
rho = dos.bethe_lattice(ra, sim.t)
ax.plot(w, rho,
label='n={:.2f}'.format(sim.ocupations().sum()))
ax | .set_xlim([-6, 6])
ax.set_ylim([0, 0.36])
ax.set_yticks([])
ax.set_ylabel('n={:.2f}'.format(sim.ocupations().sum()))
ax.legend(loc=0, handlelength=0)
|
antoinecarme/pyaf | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_LinearTrend_BestCycle_MLP.py | Python | bsd-3-clause | 151 | 0.046358 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_m | odel( ['BoxCox'] , ['LinearTrend'] , ['BestCycle'] , ['MLP'] | ); |
flexi-framework/hopr | tools/blockgridgenerator/main.py | Python | gpl-3.0 | 603 | 0.023217 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys,os
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from model import MainModel
from view import MainView
class App(QtWidgets.QApplication):
def __init__(self, scriptpath, sys_a | rgv):
super(App, self).__init__(sys_argv)
self.model = MainModel()
self.main_view = MainView(self.model, scriptpath)
self.main_view.show() #Maximized()
self.model.gridChanged.emit()
|
if __name__ == '__main__':
scriptpath = os.path.dirname(os.path.abspath(sys.argv[0]))
app = App(scriptpath, sys.argv)
sys.exit(app.exec_())
|
geggo/pyface | pyface/wizard/i_wizard.py | Python | bsd-3-clause | 5,057 | 0.001186 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" The interface for all pyface wizards. """
# Enthought library imports.
from traits.api import Bool, Instance, List, Unicode
from pyface.i_dialog import IDialog
# Local imports.
from .i_wizard_controller import IWizardController
from .i_wizard_page import IWizardPage
class IWizard(IDialog):
""" The interface for all pyface wizards. """
#### 'IWizard' interface ##################################################
# The pages in the wizard.
pages = List(IWizardPage)
# The wizard controller provides the pages displayed in the wizard, and
# determines when the wizard is complete etc.
controller = Instance(IWizardController)
# Should the 'Cancel' button be displayed?
show_cancel = Bool(True)
#### 'IWindow' interface ##################################################
# The dialog title.
title = Unicode('Wizard')
###########################################################################
# 'IWizard' interface.
###########################################################################
def next(self):
""" Advance to the next page in the wizard. """
def previous(self):
""" Return to the previous page in the wizard. """
class MWizard(object):
""" The mixin class that contains common code for toolkit specific
implementations of the IWizard interface.
Implements: next(), previous()
Reimplements: _create_contents()
"""
###########################################################################
# 'IWizard' interface.
###########################################################################
def next(self):
""" Advance to the next page in the wizard. """
page = self.controller.get_next_page(self.controller.current_page)
self._show_page(page)
return
def previous(self):
""" Return to the previous page in the wizard. """
page = self.controller.get_previous_page(self.controller.current_page)
self._show_page(page)
return
###########################################################################
# Protected 'IWindow' interface.
###########################################################################
def _create_contents(self, parent):
""" Creates the window contents. """
# This creates the dialog and button areas.
super(MWizard, self)._create_contents(parent)
# Wire up the controller.
self._initialize_controller(self.controller)
# Show the first page.
self._show_page(self.controller.get_first_page())
return
###########################################################################
# Protected MWizard interface.
###########################################################################
def | _show_page(self, page):
""" Show the specified page. """
# Set the current page in the controller.
#
# fixme: Shouldn't this interface be reversed? Maybe calling
# 'next_page' on the controller should cause it to set its own current
# page?
self.controller.current_page = page
def _up | date(self):
""" Enables/disables buttons depending on the state of the wizard. """
pass
###########################################################################
# Private interface.
###########################################################################
def _initialize_controller(self, controller):
""" Initializes the wizard controller. """
controller.on_trait_change(self._update, 'complete')
controller.on_trait_change(
self._on_current_page_changed, 'current_page'
)
return
#### Trait event handlers #################################################
def _on_current_page_changed(self, obj, trait_name, old, new):
""" Called when the current page is changed. """
if old is not None:
old.on_trait_change(self._update, 'complete', remove=True)
if new is not None:
new.on_trait_change(self._update, 'complete')
self._update()
return
def _on_closed_changed(self):
""" Called when the wizard is closed. """
self.controller.dispose_pages()
return
#### EOF ######################################################################
|
CompMusic/essentia | src/examples/python/show_algo_dependencies.py | Python | agpl-3.0 | 2,198 | 0.00455 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distribut | ed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import | subprocess
def find_dependencies(mode, algo):
code = """
import essentia.%s as es
import essentia
essentia.log.infoActive = True
essentia.log.debugLevels += essentia.EFactory
loader = es.%s()
""" % (mode, algo)
proc = subprocess.Popen(["python", "-c", code], stdout=subprocess.PIPE)
stdout = proc.communicate()[0].split('\n')
algos = []
lines = []
for line in stdout:
if line.startswith("[Factory ] "):
if line.count("Streaming: Creating algorithm: "):
a = line.split("Streaming: Creating algorithm: ")[-1]
m = "streaming"
lines.append(line)
algos.append((m, a))
if line.count("Standard : Creating algorithm: "):
a = line.split("Standard : Creating algorithm: ")[-1]
m = "standard"
lines.append(line)
algos.append((m, a))
print "---------- %s : %s ----------" % (mode, algo)
algos = sorted(list(set(algos) - set([(mode, algo)])))
print "Dependencies:"
for m,a in algos:
print m + '\t' + a
print
print '\n'.join(lines)
print
print
import essentia.standard as es
for algo in es.algorithmNames():
find_dependencies('standard', algo)
import essentia.streaming as es
for algo in es.algorithmNames():
find_dependencies('streaming', algo)
|
krishauser/Klampt | Python/klampt/math/symbolic_klampt.py | Python | bsd-3-clause | 38,043 | 0.016797 | """ Defines many functions of Klampt as symbolic Functions.
Currently implemented:
- so3, se3
- ik
- some collide functions
- RobotModel kinematics
TODO:
- RobotModel dynamics
- Trajectories
- Geometries
- support polygons
"""
from .symbolic import *
from .symbolic_linalg import *
from .. import *
from . import so3,se3
from ..model import ik,collide
from ..io import loader
import weakref
def _so3_rotation(axis,angle):
"""Symbolic version of so3.rotation"""
cm = cos(angle)
sm = sin(angle)
#m = s[r]-c[r][r]+rrt = s[r]-c(rrt-I)+rrt = cI + rrt(1-c) + s[r]
cp = so3.cross_product(axis)
R = mul(cp,sm)
R2 = [0]*9
for i in range(3):
for j in range(3):
R2[i*3+j] = axis[i]*axis[j]*(1.-cm)
R2[0] += cm
R2[4] += cm
R2[8] += cm
return R + expr(R2)
class SO3Context(Context):
"""Defines some functions in the so3 mod | ule:
- identity, matrix, inv, mul, apply, rotation, err | or, distance
- from_matrix, from_rpy, rpy, from_quaternion, quaternion, from_rotation_vector, rotation_vector
- eq_constraint: equality constraint necessary for SO3 variables
- quaternion_constraint: equality constraint necessary for quaternion variables
Completeness table
================= ============= ==============
Function Derivative Simplification
================= ============= ==============
identity N/A N/A
matrix Y Y
inv Y Y
mul
apply Y,Y
rotation N,Y
from_matrix Y
from_rpy
rpy
from_quaternion Y
quaternion
from_rotation_v
rotation_vector
axis
angle Y
error
distance
eq_constraint Y
quaternion_cons Y
================= ============= ==============
"""
def __init__(self):
Context.__init__(self)
self.type = Type('V',9)
Rvar = Variable("R",self.type)
Rsymb = VariableExpression(Rvar)
R1 = Variable("R1",self.type)
R2 = Variable("R2",self.type)
V3type = Type('V',3)
q = Variable('q',Type('V',4))
pointvar = Variable("point",V3type)
pointsymb = VariableExpression(pointvar)
self.identity = self.declare(expr(so3.identity()),"identity",[])
self.identity.returnType = self.type
self.identity.description = "The identity rotation"
M = Variable("M",Type('M',(3,3)))
self.from_matrix = self.declare(flatten(transpose(M)),"from_matrix",['M'])
self.from_matrix.returnType = self.type
self.from_matrix.argTypes = [M.type]
self.from_matrix.description = "Converts from a 3x3 matrix"
self.from_matrix.setDeriv(0,lambda M,dM:self.from_matrix(dM),asExpr=True)
self.matrix = self.declare(expr(so3.matrix(Rsymb)),"matrix",["R"])
self.matrix.returnType = self.from_matrix.argTypes[0]
self.matrix.argTypes = [self.from_matrix.returnType]
self.matrix.addSimplifier(['so3.identity'],(lambda R:eye(3)),pre=True)
self.matrix.description = "Converts to a 3x3 matrix"
self.matrix.setDeriv(0,lambda R,dR:self.matrix(dR),asExpr=True)
self.from_matrix.properties['inverse'] = weakref.proxy(self.matrix)
self.matrix.properties['inverse'] = weakref.proxy(self.from_matrix)
self.inv = self.declare(expr(so3.inv(Rsymb)),"inv",["R"])
self.inv.description = "Inverts a rotation"
self.inv.setDeriv(0,lambda R,dR:self.inv(dR),asExpr=True)
self.inv.properties['inverse'] = weakref.proxy(self.inv)
self.inv.returnType = self.type
self.inv.argTypes = [self.type]
self.inv.addSimplifier(['so3.identity'],lambda R:R)
self.mul = self.declare(so3.mul,"mul")
self.mul.description = "Inverts a rotation"
self.mul.returnType = self.type
self.mul.argTypes = [self.type,self.type]
self.mul.setDeriv(0,lambda R1,R2,dR1:self.mul(dR1,R2),asExpr=True)
self.mul.setDeriv(1,lambda R1,R2,dR2:self.mul(R1,dR2),asExpr=True)
self.mul.addSimplifier(['so3.identity',None],(lambda R1,R2:R2),pre=True)
self.mul.addSimplifier([None,'so3.identity'],(lambda R1,R2:R1),pre=True)
self.mul.properties['associative'] = True
self.apply = self.declare(expr(so3.apply(Rsymb,pointsymb)),"apply",["R","point"])
self.apply.addSimplifier(['so3.identity',None],(lambda R,point:point),pre=True)
self.apply.addSimplifier([None,'zero'],(lambda R,point:point),pre=True)
self.apply.returnType = V3type
self.apply.argTypes = [self.type,V3type]
self.apply.autoSetJacobians()
self.rotation = self.declare(so3.rotation,"rotation")
self.rotation.returnType = self.type
self.rotation.argTypes = [V3type,Numeric]
self.rotation.setDeriv(1,lambda axis,angle:so3.cross_product(axis))
self.rotation.addSimplifier([None,'zero'],(lambda axis,angle:self.identity),pre=True)
self.from_rpy = self.declare(so3.from_rpy,"from_rpy")
self.from_rpy.returnType = self.type
self.from_rpy.argTypes = [V3type]
self.rpy = self.declare(so3.rpy,"rpy")
self.rpy.returnType = self.from_rpy.argTypes[0]
self.rpy.argTypes = [self.from_rpy.returnType]
self.from_rpy.properties['inverse'] = weakref.proxy(self.rpy)
self.rpy.properties['inverse'] = weakref.proxy(self.from_rpy)
self.from_quaternion = self.declare(expr(so3.from_quaternion([q[0],q[1],q[2],q[3]])),"from_quaternion",["q"])
self.from_quaternion.returnType = self.type
self.from_quaternion.argTypes = [Type('V',4)]
self.quaternion = self.declare(so3.quaternion,"quaternion")
self.quaternion.returnType = self.from_quaternion.argTypes[0]
self.quaternion.argTypes = [self.from_quaternion.returnType]
self.from_quaternion.properties['inverse'] = weakref.proxy(self.quaternion)
self.quaternion.properties['inverse'] = weakref.proxy(self.from_quaternion)
self.from_rotation_vector = self.declare(so3.from_rotation_vector,"from_rotation_vector")
self.from_rotation_vector.returnType = self.type
self.from_rotation_vector.argTypes = [V3type]
self.rotation_vector = self.declare(so3.rotation_vector,"rotation_vector")
self.rotation_vector.returnType = self.from_rotation_vector.argTypes[0]
self.rotation_vector.argTypes = [self.from_rotation_vector.returnType]
self.from_rotation_vector.properties['inverse'] = weakref.proxy(self.rotation_vector)
self.rotation_vector.properties['inverse'] = weakref.proxy(self.from_rotation_vector)
self.axis = self.declare(unit(self.rotation_vector(Rvar)),"rotation",["R"])
self.axis.returnType = V3type
self.axis.argTypes = [self.type]
self.angle = self.declare(so3.angle,"angle")
self.angle.returnType = Numeric
self.angle.argTypes = [self.type]
def angle_jacobian(R):
cosangle = (R[0]+R[4]+R[8]-1)*0.5
angle = arccos(cosangle)
#dangle / dR[0] = -1.0/sqrt(1-cosangle**2) * dcosangle/dR[0]
dacos = -1.0/sqrt(1-cosangle**2)
x = 0.5*dacos
return expr([[x,0,0,0,x,0,0,0,x]])
self.angle.setJacobian(0,angle_jacobian,asExpr=True)
self.error = self.declare(so3.error,"error")
self.error.returnType = V3type
self.error.argTypes = [self.type,self.type]
self.dis |
kumar303/addons-server | conftest.py | Python | bsd-3-clause | 7,121 | 0 | """
pytest hooks and fixtures used for our unittests.
Please note that there should not be any Django/Olympia related imports
on module-level, they should instead be added to hooks or fixtures directly.
"""
import os
import uuid
import warnings
import pytest
import responses
import six
@pytest.fixture(autouse=True)
def unpin_db(request):
"""Unpin the database from master in the current DB.
The `multidb` middleware pins the current thread to master for 15 seconds
after any POST request, which can lead to unexpected results for tests
of DB slave functionality."""
from multidb import pinning
request.addfinalizer(pinning.unpin_this_thread)
@pytest.fixture(autouse=True)
def mock_elasticsearch():
"""Mock ElasticSearch in tests by default.
Tests that do need ES should inherit from ESTestCase, which will stop the
mock at setup time."""
from olympia.amo.tests import start_es_mocks, stop_es_mocks
start_es_mo | cks()
yield
stop_es_mocks()
@pytest.fixture(autouse=True)
def start_responses_mocking(request):
"""Enable ``responses`` this enforcing us to explicitly mark tests
that require internet usage.
"""
marker = request.node.get_closest_marker | ('allow_external_http_requests')
if not marker:
responses.start()
yield
try:
if not marker:
responses.stop()
responses.reset()
except RuntimeError:
# responses patcher was already uninstalled
pass
@pytest.fixture(autouse=True)
def mock_basket(settings):
"""Mock Basket in tests by default.
Tests that do need basket to work should disable `responses`
and add a passthrough.
"""
USER_TOKEN = u'13f64f64-1de7-42f6-8c7f-a19e2fae5021'
responses.add(
responses.GET,
settings.BASKET_URL + '/news/lookup-user/',
json={'status': 'ok', 'newsletters': [], 'token': USER_TOKEN})
responses.add(
responses.POST,
settings.BASKET_URL + '/news/subscribe/',
json={'status': 'ok', 'token': USER_TOKEN})
responses.add(
responses.POST,
settings.BASKET_URL + '/news/unsubscribe/{}/'.format(USER_TOKEN),
json={'status': 'ok', 'token': USER_TOKEN})
def pytest_configure(config):
import django
# Forcefully call `django.setup`, pytest-django tries to be very lazy
# and doesn't call it if it has already been setup.
# That is problematic for us since we overwrite our logging config
# in settings_test and it can happen that django get's initialized
# with the wrong configuration. So let's forcefully re-initialize
# to setup the correct logging config since at this point
# DJANGO_SETTINGS_MODULE should be `settings_test` every time.
django.setup()
from olympia.amo.tests import prefix_indexes
prefix_indexes(config)
@pytest.fixture(autouse=True, scope='session')
def instrument_jinja():
"""Make sure the "templates" list in a response is properly updated, even
though we're using Jinja2 and not the default django template engine."""
import jinja2
from django import test
old_render = jinja2.Template.render
def instrumented_render(self, *args, **kwargs):
context = dict(*args, **kwargs)
test.signals.template_rendered.send(
sender=self, template=self, context=context)
return old_render(self, *args, **kwargs)
jinja2.Template.render = instrumented_render
def default_prefixer(settings):
"""Make sure each test starts with a default URL prefixer."""
from django import http
from olympia import amo
request = http.HttpRequest()
request.META['SCRIPT_NAME'] = ''
prefixer = amo.urlresolvers.Prefixer(request)
prefixer.app = settings.DEFAULT_APP
prefixer.locale = settings.LANGUAGE_CODE
amo.urlresolvers.set_url_prefix(prefixer)
@pytest.yield_fixture(autouse=True)
def test_pre_setup(request, tmpdir, settings):
from django.core.cache import caches
from django.utils import translation
from olympia import amo, core
from olympia.translations.hold import clean_translations
from waffle.utils import get_cache as waffle_get_cache
from waffle import models as waffle_models
# Ignore ResourceWarning for now. It's a Python 3 thing so it's done
# dynamically here.
if six.PY3:
warnings.filterwarnings('ignore', category=ResourceWarning) # noqa
# Clear all cache-instances. They'll be re-initialized by Django
# This will make sure that our random `KEY_PREFIX` is applied
# appropriately.
# This is done by Django too whenever `settings` is changed
# directly but because we're using the `settings` fixture
# here this is not detected correctly.
caches._caches.caches = {}
# Randomize the cache key prefix to keep
# tests isolated from each other.
prefix = uuid.uuid4().hex
settings.CACHES['default']['KEY_PREFIX'] = 'amo:{0}:'.format(prefix)
# Reset global django-waffle cache instance to make sure it's properly
# using our new key prefix
waffle_models.cache = waffle_get_cache()
translation.trans_real.deactivate()
# Django fails to clear this cache.
translation.trans_real._translations = {}
translation.trans_real.activate(settings.LANGUAGE_CODE)
def _path(*args):
path = str(os.path.join(*args))
if not os.path.exists(path):
os.makedirs(path)
return path
settings.STORAGE_ROOT = storage_root = _path(str(tmpdir.mkdir('storage')))
settings.SHARED_STORAGE = shared_storage = _path(
storage_root, 'shared_storage')
settings.ADDONS_PATH = _path(storage_root, 'files')
settings.GUARDED_ADDONS_PATH = _path(storage_root, 'guarded-addons')
settings.GIT_FILE_STORAGE_PATH = _path(storage_root, 'git-storage')
settings.MEDIA_ROOT = _path(shared_storage, 'uploads')
settings.TMP_PATH = _path(shared_storage, 'tmp')
# Reset the prefixer and urlconf after updating media root
default_prefixer(settings)
from django.urls import clear_url_caches, set_urlconf
def _clear_urlconf():
clear_url_caches()
set_urlconf(None)
_clear_urlconf()
request.addfinalizer(_clear_urlconf)
yield
core.set_user(None)
clean_translations(None) # Make sure queued translations are removed.
# Make sure we revert everything we might have changed to prefixers.
amo.urlresolvers.clean_url_prefixes()
@pytest.fixture
def admin_group(db):
"""Create the Admins group."""
from olympia.access.models import Group
return Group.objects.create(name='Admins', rules='*:*')
@pytest.fixture
def mozilla_user(admin_group, settings):
"""Create a "Mozilla User"."""
from olympia.access.models import GroupUser
from olympia.users.models import UserProfile
user = UserProfile.objects.create(pk=settings.TASK_USER_ID,
email='admin@mozilla.com',
username='admin')
user.save()
GroupUser.objects.create(user=user, group=admin_group)
return user
|
thisisshi/cloud-custodian | tests/test_sns.py | Python | apache-2.0 | 26,225 | 0.000801 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
from .common import BaseTest, functional
from c7n.resources.aws import shape_validate
from c7n.utils import yaml_load
class TestSNS(BaseTest):
@functional
def test_sns_remove_matched(self):
session_factory = self.replay_flight_data("test_sns_remove_matched")
client = session_factory().client("sns")
name = "test-sns-remove-matched"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::644160558196:root"},
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
},
{
"Sid": "Public",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
},
],
}
),
)
p = self.load_policy(
{
"name": "sns-rm-matched",
"resource": "sns",
"filters": [
{"TopicArn": topic_arn},
{"type": "cross-account", "whitelist": ["123456789012"]},
],
"actions": [{"type": "remove-statements", "statement_ids": "matched"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual([r["TopicArn"] for r in resources], [topic_arn])
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertEqual(
[s["Sid"] for s in data.get("Statement", ())], ["SpecificAllow"]
)
@functional
def test_sns_remove_named(self):
session_factory = self.replay_flight_data("test_sns_remove_named")
client = session_factory().client("sns")
name = "test-sns-remove-named"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
},
{
"Sid": "RemoveMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
},
],
}
),
)
p = self.load_policy(
{
"name": "sns-rm-named",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{"type": "remove-statements", "statement_ids": ["RemoveMe"]}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertTrue("RemoveMe" not in [s["Sid"] for s in data.get("Statement", ())])
@functional
def test_sns_modify_replace_policy(self):
session_factory = self.replay_flight_data("test_sns_modify_replace_policy")
client = session_factory().client("sns")
name = "test_sns_modify_replace_policy"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
}
],
}
),
)
p = self.load_policy(
{
"name": "sns-modify-replace-policy",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{
"type": "modify-policy",
"add-statements": [
{
"Sid": "ReplaceWithMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
}
],
"remove-statements": "*",
| }
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attrib | utes"
][
"Policy"
]
)
self.assertTrue(
"ReplaceWithMe" in [s["Sid"] for s in data.get("Statement", ())]
)
@functional
def test_sns_account_id_template(self):
session_factory = self.replay_flight_data("test_sns_account_id_template")
client = session_factory().client("sns")
name = "test_sns_account_id_template"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
}
],
}
),
)
p = self.load_policy(
{
"name": "sns-modify-replace-policy",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{
"type": "modify-policy",
"add-statements": [
{
"Sid": "__default_statement_ID_{account_id}",
"Effect": "Allow",
"Principal": {"Service": "s3.amazo |
domino14/Webolith | scripts/gen_firewall.py | Python | gpl-3.0 | 2,398 | 0.001251 | template = """# Generated on {{dt}}
*filter
:INPUT DROP
:FORWARD ACCEPT
:OUTPUT ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT
-A INPUT -i eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp -j ACCEPT
{{#rule}}{{#tcprule}}
-A INPUT -s {{source}}/32 -p tcp -m tcp --dport {{dport}} -m state --state NEW,ESTABLISHED -j ACCEPT
{{/tcprule}}{{#allrule}}
-A INPUT -p {{protocol}} -m {{protocol}} --dport {{dport}} -j ACCEPT
{{/allrule}}{{/rule}}
COMMIT
"""
import pystache
import datetime
# securityGroups are a hash of "security groups" and a list of boxes in each
# group
securityGroups = {'Database': ['aerolith-pg'],
'Web': ['aerolith-web'],
'Wordpress': ['AerolithWP'],
'Dev': ['ubuntu-512mb-sfo1-01']
}
# groupRules tell you for each security groups, which security groups
# can connect to it and what ports
# note all of these have port 22 (ssh) open by default (see template above)
groupRules = {'Web': [('all', 80), ('all', 443), ('all', 21), ('all', 20),
('all', '61052:61057'), ('all', 8080)],
'Redis': [('Web', 6379), ('all', 80)],
'Database': [('Web', 5432)],
'Dev': [('all', 80), ('all', 443)]
}
def gen_firewall(securityGroup, servers):
context = {'rule': {'tcprule': [], 'allrule': []},
'dt': str(datetime.datetime.now())}
rule = groupRules[securityGroup]
for subrule in rule:
if subrule[0] == 'all':
port = subrule[1]
context['rule']['allrule'].append({'dport': port,
'protocol': 'tcp'})
else:
for server in servers:
# for each ser | ver in the security group in question
# add its private ip to the firewall
if server['name'] in securityGroups[subrule[0]]:
port = subrule[1]
context['rule']['tcprule'].append(
{'source': server['networks']['v4'][0]['ip_address'],
'dport': port
})
res = pystache.render(template, context)
f = open('iptables.' + securityGroup + '.rules', 'wb' | )
f.write(res)
f.close()
return res
|
pferreir/indico-backup | indico/MaKaC/plugins/Collaboration/Vidyo/pages.py | Python | gpl-3.0 | 11,041 | 0.003713 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from MaKaC.plugins.Collaboration.base import WCSPageTemplateBase, WJSBase, WCSCSSBase
from MaKaC.webinterface.common.tools import strip_ml_tags, unescape_html
from MaKaC.i18n import _
from MaKaC.fossils.user import IAvatarFossil
from MaKaC.plugins.Collaboration.pages import WAdvancedTabBase
from MaKaC.plugins.Collaboration.Vidyo.common import VidyoTools, getVidyoOptionValue
from datetime import timedelta
from MaKaC.common import info
from MaKaC.plugins.Collaboration.Vidyo.indexes import EventEndDateIndex
class WNewBookingForm(WCSPageTemplateBase):
def getVars(self):
variables = WCSPageTemplateBase.getVars(self)
variables["EventTitle"] = VidyoTools.defaultRoomName(self._conf)
variables["EventDescription"] = unescape_html(strip_ml_tags(self._conf.getDescription())).strip()
return variables
class WAdvancedTab(WAdvancedTabBase):
def getVars(self):
variables = WAdvancedTabBase.getVars(self)
return variables
class WMain (WJSBase):
def getVars(self):
variables = WJSBase.getVars(self)
variables["LoggedInUser"] = self._user.fossilize(IAvatarFossil)
variables["MaxNameLength"] = VidyoTools.maxRoomNameLength()
return variables
class WExtra (WJSBase):
def getVars(self):
variables = WJSBase.getVars(self)
return variables
class WIndexing(WJSBase):
pass
class WStyle (WCSCSSBase):
pass
class WInformationDisplay(WCSPageTemplateBase):
def __init__(self, booking, displayTz):
WCSPageTemplateBase.__init__(self, booking.getConference(), 'Vidyo', None)
self._booking = booking
self._displayTz = displayTz
def getVars(self):
variables = WCSPageTemplateBase.getVars(self)
variables["Booking"] = self._booking
variables["PhoneNumbers"] = getVidyoOptionValue("phoneNumbers")
return variables
class XMLGenerator(object):
@classmethod
def getDisplayName(cls):
return _("Vidyo public room")
@classmethod
def getFirstLineInfo(cls, booking, displayTz):
return booking.getBookingParamByName('roomName')
@classmethod
def getCustomBookingXML(cls, booking, displayTz, out):
if (booking.canBeStarted()):
out.openTag("launchInfo")
out.writeTag("launchText", _("Join Now!"))
out.writeTag("launchLink", booking.getURL())
out.writeTag("launchTooltip", _('Click here to join the Vidyo room!'))
out.closeTag("launchInfo")
out.writeTag("firstLineInfo", booking.getBookingParamByName("roomName"))
out.openTag("information")
out.openTag("section")
out.writeTag("title", _('Room name:'))
out.writeTag("line", booking.getBookingParamByName("roomName"))
out.closeTag("section")
out.openTag("section")
out.writeTag("title", _('Extension:'))
out.writeTag("line", booking.getExtension())
out.closeTag("section")
if booking.getHasPin():
out.openTag("section")
out.writeTag("title", _('Meeting PIN:'))
if booking.getBookingParamByName("displayPin"):
out.writeTag("line", booking.getPin())
else:
out.writeTag("line", _('This Vidyo room is protected by a PIN'))
out.closeTag("section")
out.openTag("section")
out.writeTag("title", _('Owner:'))
out.writeTag("line", booking.getOwnerObject().getFullName())
out.closeTag("section")
if booking.getBookingParamByName("displayURL"):
out.openTag("section")
out.writeTag("title", _('Auto-join URL:'))
out.openTag("linkLine")
out.writeTag("href", booking.getURL())
out.writeTag("caption", booking.getURL())
out.closeTag("linkLine")
out.closeTag("section")
if booking.getBookingParamByName("displayPhoneNumbers") and getVidyoOptionValue("phoneNumbers"):
out.openTag("section")
out.writeTag("title", _('Phone access:'))
out.writeTag("line", ', '.join(getVidyoOptionValue("phoneNumbers")))
out.closeTag("section")
out.openTag("section")
out.writeTag("title", _('Description:'))
out.writeTag("line", booking.getBookingParamByName("roomDescription"))
out.closeTag("section")
out.closeTag("information")
class ServiceInformation(object):
@classmethod
def getDisplayName(cls):
return _("Vidyo public room")
@classmethod
def getFirstLineInfo(cls, booking, displayTz=None):
return booking.getBookingParamByName('roomName')
@classmethod
def getLaunchInfo(cls, booking, displayTz=None):
launchInfo = {
"launchText": _("Join Now!"),
"launchLink": ""
}
if (booking.canBeStarted()):
launchInfo["launchLink"] = booking.getURL()
launchInfo["launchTooltip"] = _('Click here to join the Vidyo room!')
else:
launchInfo["launchTooltip"] = _('You cannot join to the Vidyo room because the room does not exist')
return launchInfo
@classmethod
def getInformation(cls, booking, displayTz=None):
sections = []
sections.append({
"title": _('Room name'),
'lines': [booking.getBookingParamByName("roomName")],
})
sections.append({
"title": _('Extension'),
'lines': [booking.getExtension()],
})
if booking.getHasPin():
pinSection = {}
pinSection['title'] = _('Meeting PIN')
if booking.getBookingParamByName("displayPin"):
pinSection['lines'] = [booking.ge | tPin()]
else:
| pinSection['lines'] = [_('This Vidyo room is protected by a PIN')]
sections.append(pinSection)
sections.append({
"title": _('Moderator'),
'lines': [booking.getOwnerObject().getStraightFullName()],
})
if booking.getBookingParamByName("displayPhoneNumbers") and getVidyoOptionValue("phoneNumbers"):
sections.append({
"title": _('Phone access numbers'),
'lines': [', '.join(getVidyoOptionValue("phoneNumbers"))],
})
sections.append({
"title": _('Description'),
'lines': [booking.getBookingParamByName("roomDescription")],
})
if booking.getBookingParamByName("displayURL"):
autojoinSection = {}
autojoinSection['title'] = _('Auto-join URL')
autojoinSection['linkLines'] = [(booking.getURL(), booking.getURL())]
sections.append(autojoinSection)
return sections
## Vidyo custom classes for action results
class WShowOldRoomIndexActionResult(WCSPageTemplateBase):
def __init__(self, maxDate):
WCSPageTemplateBase.__init__(self, None, "Vidyo", None)
self._maxDate = maxDate
def _postProcessingClones(self, oldBookingsPerConfIterator, newBookingsPerConfIterator):
oldBookingsPerConf = {}
newBookingsPerConf = {}
for booking in newBookingsPerConfIterator:
key = EventEndDateIndex._bookingToKey(booking)
newBookingsPerConf[key][booking.getConference()] = newBookingsPerConf.setdefault(key, {}).setdef |
futurice/futurice-ldap-user-manager | fum/servers/views.py | Python | bsd-3-clause | 3,137 | 0.009882 | from django.shortcuts import redirect
from django.views.generic import TemplateView
from d | jango.views.generic import ListView, DetailView
from django.views.generic.edit import FormView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from fum.models import Users, Groups, Servers
from fum.util import append_crud_urls
from fum.common.views import filter_by_permissions
from fum.servers.forms import Se | rversForm
import json
NAME = 'servers'
MODEL = Servers
class ListView(ListView):
model = MODEL
template_name = '%s/%s_list.html'%(NAME,NAME)
# Order by case-insensitive name (because Postgre)
def get_queryset(self):
return self.model.objects.all().extra(select={'lower_name': 'lower(name)'}).order_by('lower_name')
def get_context_data(self, **kwargs):
context = super(ListView, self).get_context_data(**kwargs)
append_crud_urls(context, NAME)
return context
class DetailView(DetailView):
template_name = '%s/%s_detail.html'%(NAME,NAME)
model = MODEL
slug_field = 'name'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
return context
class View(FormView):
success_url = '/%s/'%NAME
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
#form.send_email()
return super(View, self).form_valid(form)
class Create(CreateView):
template_name = '%s/%s_form.html'%(NAME,NAME)
model = MODEL
form_class = ServersForm
class Update(UpdateView):
template_name = '%s/%s_form.html'%(NAME,NAME)
model = MODEL
def form_valid(self, form):
if 'join' in self.request.POST:
user = Users.objects.get(username = self.request.META['REMOTE_USER'])
self.object.users.add(user)
elif 'join_sudoers' in self.request.POST:
user = Users.objects.get(username = self.request.META['REMOTE_USER'])
self.object.sudoers.add(user)
else:
return super(Update, self).form_valid(form)
return redirect(self.object)
def servers_json(request):
user = Users.objects.get(username = request.META['REMOTE_USER'])
try:
q = request.GET['q']
q = q.strip()
filtered = Servers.objects.filter(name__icontains=q)
servers = []
for server in filter_by_permissions(request, user, filtered.distinct()):
json_server={}
json_server['name'] = server.name
servers.append(json_server)
except KeyError:
servers = [server.name for server in filter_by_permissions(request, user, Servers.objects.all())]
return HttpResponse(json.dumps(servers), content_type='application/json')
def server_detail_json(request, servername):
server = get_object_or_404(Groups, name=groupname)
users = [user.username for user in server.users.all()]
return HttpResponse(json.dumps(users), content_type='application/json')
|
skodapetr/lbvs-environment | methods/ecfc/ecfc2_tanimoto.py | Python | mit | 1,365 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rdkit
from rdkit.Chem import AllChem
from rdkit import DataStructs
__license__ = "X11"
METADATA = {
"id": "method_rdkit_ecfc2_tanimoto",
"representation": "ecfc2",
"similarity": "tanimoto"
}
def _compute_fingerprint(molecule):
return AllChem.GetMorganFingerprin | t(molecule, 1)
def _compute_similarity(left, right):
return DataStructs.TanimotoSimilarity(left, right)
def create_model(train_ligands, train_decoys):
model = []
for molecule in train_ligands:
model.append({
"name": molecule.GetProp("_Name"),
"fingerprint": _compute_fingerprint(molecule)
})
model_information = {}
return model, model_information
def compute_score(model, molecule):
fingerp | rint = _compute_fingerprint(molecule)
similarities = [_compute_similarity(fingerprint, item["fingerprint"])
for item in model]
max_score = max(similarities)
index_of_max_score = similarities.index(max_score)
closest_molecule = model[index_of_max_score]
return {
"value": max_score,
"info": {
"closest": closest_molecule["name"]
}
}
def compute_similarity(left, right):
return _compute_similarity(_compute_fingerprint(left),
_compute_fingerprint(right))
|
berinhard/py-notify | notify/utils.py | Python | lgpl-2.1 | 11,952 | 0.011577 | # -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
A collection of utilities that can also be used from outside, if wanted. Functions and
classes here can be assumed public and won’t disappear in future Py-notify versions.
@var is_callable:
Determine if C{object} is callable. E.g. if it is a function, method, class, instance of
a class with C{__call__}, etc. This is the same as built-in function C{callable} does.
C{is_callable} is provided since C{callable} is going to disappear in Python 3000 and may
issue warnings in 2.6.
@var as_string:
Convert any attribute to its name as string. Main use of this utility object is to
perform Python ‘private’ identifier mangling. E.g. you can write::
class MyClass (object):
__slots__ = ('__x')
def get_x (self):
if hasattr (self, as_string.__x):
return self.__x
Advantage is that you don’t have to do mangling ‘by hands’ and hence there is less chance
for a typing error. Furthermore, this code does not require changes if you change
C{MyClass} name to anything else, whereas custom mangling does.
However, usefulness of ‘as_string’ is still doubtful. When I wrote it, I didn’t know one
could just write ``__slots__ = ('__x')``, I thought it needed to be
``__slots__ = ('_MyClass__x')``. Imagine...
"""
__docformat__ = 'epytext en'
__all__ = ('is_callable', 'is_valid_identifier', 'mangle_identifier',
'as_string',
'raise_not_implemented_exception',
'execute',
'frozendict', 'DummyReference', 'ClassTypes', 'StringType')
import re
import sys
import types
from keyword import iskeyword
if sys.version_info[:3] < (2, 6, 0):
is_callable = callable
else:
def is_callable (object):
return hasattr (object, '__call__')
def is_valid_identifier (identifier):
"""
Determine if C{identifier} is a valid Python identifier. This function never raises
any exceptions. If C{identifier} is not a string, it simply returns C{False}.
@param identifier: identifier to determin if it is valid
@type identifier: C{basestring}
@rtype: C{bool}
"""
return (isinstance (identifier, StringType)
and re.match ('^[_a-zA-Z][_a-zA-Z0-9]*$', identifier) is not None
and not iskeyword (identifier))
def mangle_identifier (class_name, identifier):
"""
Mangle C{identifier} as how would be done if it appeared in a class with
C{class_name}. This function allows to mimic standard Python mangling of
pseudo-private attributes, i.e. those which names start with two underscores and don’t
end in two. If C{identifier} is not considered a private name, it is returned
unchanged.
@param class_name: name of Python class.
@type class_name: C{basestring}
@param identifier: name of an attribute of that class.
@type identifier: C{basestring}
@rtype: C{str}
@raises ValueError: if either C{class_name} or C{identifier} is not valid from
Python’s point of view.
"""
if not (is_valid_identifier (class_name) and is_valid_identifier (identifier)):
raise ValueError ("'class_name' and 'identifier' must be valid Python identifiers")
if (identifier.startswith ('__')
and not identifier.endswith ('__')
and class_name != '_' * len (class_name)):
return '_%s%s' % (class_name.lstrip ('_'), identifier)
else:
return identifier
class _AsString (object):
"""
Internal helper class for C{L{as_string}}. Don’t use directly.
"""
__slots__ = ()
def __getattribute__(self, name):
return name
def __setattr__(self, name, value):
raise TypeError ("'as_string' attributes cannot be set")
def __delattr__(self, name):
raise TypeError ("'as_string' attributes cannot be deleted")
def __repr__(self):
return 'notify.utils.as_string'
as_string = _AsString ()
def raise_not_implemented_exception (object = None, function_name = None):
"""
Raise C{NotImplementedError} for a method invoked with C{object} as C{self}. The
function determines object class and method declaration class(es) itself and that’s
the whole point of it.
It should be called like this:
>>> raise_not_implemented_exception (self)
And output might look like this::
File ".../foo.py", line # in ?
Foo ().bar ()
File ".../foo.py", line #, in bar
raise_not_implemented_exception (self)
File ".../notify/utils.py", line #, in raise_not_implemented_exception
raise exception
NotImplementedError: bar() not implemented in class Foo (declared in AbstractFoo)
Optionally, C{function_name} can be specified. This argument mainly exists for C
extension, since function name cannot be detected automatically in this case. In
Python code you should just leave this argument out.
@param | object: the object for which a non-implemented method is called.
@type object: C{object}
@param function_name: name of the unimplemented function or method (inferred
automatically for non-extension functions).
@type function_name: C{basestring} or C{None}
@raises NotImplementedError: always.
"""
if function_name is None:
try:
raise Ex | ception
except Exception:
try:
traceback = sys.exc_info () [2]
function_name = traceback.tb_frame.f_back.f_code.co_name
except Exception:
# We can do nothing, ignore.
pass
if function_name is not None:
function_description = '%s()' % function_name
else:
function_description = 'UNKNOWN FUNCTION'
try:
class_description = ' in class %s' % object.__class__.__name__
if function_name is not None:
declaration_classes = _find_declaration_classes (object.__class__, function_name)
if len (declaration_classes) == 1:
if declaration_classes[0] is not object.__class__:
class_description += ' (declared in %s)' % declaration_classes[0].__name__
elif len (declaration_classes) > 1:
class_description += (' (declared in %s)'
% ', '.join ([_class.__name__
for _class in declaration_classes]))
except Exception:
class_description = ''
exception = NotImplementedError ('%s not implemented%s'
% (function_description, class_description))
raise exception
d |
rsdk/labaccess | laborzugang/settings.py | Python | bsd-2-clause | 2,131 | 0.000469 | """
Django settings for laborzugang project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h830^$619skuv!ad&hm8_lw$aji=#*1vc+uw_j^9r%hg2*ep24'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'labaccess',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth. | middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'laborzugang.urls | '
WSGI_APPLICATION = 'laborzugang.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
|
google/ml-fairness-gym | core_test.py | Python | apache-2.0 | 4,658 | 0.005796 | # coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for fairness_gym.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import absltest
from absl.testing import parameterized
import attr
import core
import test_util
from agents import random_agents
from environments import attention_allocation
import gym
import numpy as np
from six.moves import range
@attr.s
class CoreTestParams(core.Params):
a = attr.ib(default=1)
b = attr.ib(default=2)
c = attr.ib(default=3)
# This class defines a state that is compatible with DummyEnv in test_util.py
@attr.s(cmp=False)
class CoreTestState(core.State):
x = attr.ib(default=0.)
params = attr.ib(default=None)
rng = attr.ib(factory=np.random.RandomState)
class CoreApiTest(parameterized.TestCase):
def test_interactions(self):
# With no arguments tests dummy implementations defined in test_util.
test_util.run_test_simulation()
def test_invalid_env_interactions(self):
env = test_util.DummyEnv()
with self.assertRaises(gym.error.InvalidAction):
env.step('not a real action')
# Succeeds.
env.step(0)
def test_metric_multiple(self):
env = attention_allocation.LocationAllocationEnv()
agent = random_agents.RandomAgent(env.action_space, None,
env.observation_space)
env.seed(100)
observation = env.reset()
done = False
for _ in range(2):
action = agent.act(observation, done)
observation, _, done, _ = env.step(action)
metric1 = core.Metric(env)
metric2 = core.Metric(env)
history1 = metric1._extract_history(env)
history2 = metric2._extract_history(env)
self.assertEqual(history1, history2)
def test_episode_done_raises_error(self):
env = test_util.DummyEnv()
agent = random_agents.RandomAgent(env.action_space, None,
env.observation_space)
obs = env.reset()
with self.assertRaises(core.EpisodeDoneError):
agent.act(obs, done=True)
def test_metric_realigns_history(self):
env = test_util.DummyEnv()
agent = random_agents.RandomAgent(env.action_space, None,
env.observation_space)
env.set_scalar_reward(agent.reward_fn)
def realign_fn(history):
return [(1, action) for _, action in history]
metric = test_util.DummyMetric(env, realign_fn=realign_fn)
_ = test_util.run_test_simulation(env, agent, metric)
history = metric._extract_history(env)
self.assertCountEqual([1] * 10, [state for state, _ in history])
def test_state_deepcopy_maintains_equality(self):
state = CoreTestState(x=0., params=None, rng=np.random.RandomState())
copied_state = copy.deepcopy(state)
self.assertIsInstance(copied_state, CoreTestState)
self.assertEqual(state, copied_state)
def test_state_with_nested_numpy_serializes(self):
@attr.s
class _TestState(core.State):
x = attr.ib()
state = _TestState(x={'a': np.zeros(2, dtype=int)})
self.assertEqual(state.to_json(), '{"x": {"a": [0, 0]}}')
def test_base_state_updater_raises(self):
env = test_util.DummyEnv()
state = env._get_state()
with self.assertRaises(NotImplementedError):
core.StateUpdater().update(state, env.action_space.sample())
def test_noop_state_updater_does_nothing(self):
env = test_util.DummyEnv()
state = env._get_state()
before = copy.deepcopy(state)
core.NoUpdate().update(state, env.action_space.sample())
self.assertEqual(state, before)
def test_json_encode_function(self):
def my_function(x):
r | eturn x
self.assertIn('my_function',
core.to_json({'params': {
'function': my_function
}}))
def test_to_json_with_indent(self):
self.assertNotIn('\n', core.to_json({'a': 5, 'b': [1, 2, 3]}))
self.assertIn('\n', core.to_json({'a': 5, 'b': [1, 2, 3]}, indent=4))
if __na | me__ == '__main__':
absltest.main()
|
stumped2/school | CS480/milestone3/myreglexer.py | Python | apache-2.0 | 2,134 | 0.009372 | #!/usr/bin/env python
import collections
import sys
import re
def tokenize(stream):
Token = collections.namedtuple('Token', ['typ', 'value', 'line', 'column'])
tokenSpec = [
('REAL', r'[-]?(?=\d*[.eE])(?=\.?\d)\d*\.?\d*(?:[eE][+-]?\d+)?'),
('I | NTEGER', r'[-]?[0-9]+'),
('MINUS', r'\-'),
('STRING', r'\"(\\.|[^"])*\"'),
('BINOP', r'[\+\^\*/%]|([<>!]=?|=)|or|and'),
('UNOP', r'not|sin|cos|tan'),
('ASSIGN', r':='),
('ST | ATEMENT', r'stdout|while|if|let'),
('TYPES', r'bool|int|float|string'),
('BOOL', r'true|false'),
('NAME', r'[a-zA-Z_]+?[a-zA-Z0-9_]+|[a-zA-Z]'),
('LBRACE', r'\['),
('RBRACE', r'\]'),
('NEWLINE', r'\n'),
('SKIP', r'[ \t]'),
]
tokenRegex = '|'.join('(?P<%s>%s)' % pair for pair in tokenSpec)
nextToken = re.compile(tokenRegex).match
# Setup the line start and current position ...
pos = lineStart = 0
# ... as well as the current line.
x = 1
# Fetch the first token ...
token = nextToken(stream)
# ... and start the processing.
while token is not None:
# Fetch the token type ...
typ = token.lastgroup
# ... and increment line counter if it is a newline.
if typ == 'NEWLINE':
lineStart = pos
x += 1
elif typ != 'SKIP':
# Fetch the token value ...
value = token.group(typ)
yield Token(typ, value, x, token.start() - lineStart)
pos = token.end()
token = nextToken(stream, pos)
if pos != len(stream):
print 'Unexpected character %r on line %d' % (stream[pos], x)
sys.exit(1)
def main(argv):
lexemes = []
x = 1
with open(argv, 'r') as f:
#data = f.readlines();
contents = f.read()
for token in tokenize(contents):
print token
if __name__ == "__main__":
main(sys.argv[1])
|
googleapis/python-container | samples/generated_samples/container_v1_generated_cluster_manager_set_labels_async.py | Python | apache-2.0 | 1,476 | 0.000678 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License a | t
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SetLabels
# NOTE: This snippet has been automatically generated for illus | trative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-container
# [START container_v1_generated_ClusterManager_SetLabels_async]
from google.cloud import container_v1
async def sample_set_labels():
# Create a client
client = container_v1.ClusterManagerAsyncClient()
# Initialize request argument(s)
request = container_v1.SetLabelsRequest(
label_fingerprint="label_fingerprint_value",
)
# Make the request
response = await client.set_labels(request=request)
# Handle the response
print(response)
# [END container_v1_generated_ClusterManager_SetLabels_async]
|
TeXitoi/navitia | source/sql/alembic/versions/2d86200bcb93_co2_emission_column_added.py | Python | agpl-3.0 | 527 | 0.00759 | """co2_emission column added
Revision ID: 2d86200bcb93
Revises | : 82749d34a18
Create Date: 2014-12-30 17:23:39.654559
"""
# revision identifiers, used by Alembic.
revision = '2d86200bcb93'
down_revision = '82749d34a18'
from alembic import op
import sqlalchemy as sa
import geoalchemy2 as ga
def upgrade():
op.add_column('physical_mode', sa.Column('co2_emission', sa.FLOAT(), nullable=False, server_default='0'), schema='navitia')
def downgrade():
op.drop_column('physic | al_mode', 'co2_emission', schema='navitia')
|
anthill-services/anthill-store | anthill/store/server.py | Python | mit | 4,175 | 0.00024 |
from anthill.common.options import options
from . import handler as h
from . import options as _opts
from anthill.common import server, database, access, keyvalue
from anthill.common.social.steam import SteamAPI
from anthill.common.social.xsolla import XsollaAPI
from anthill.common.social.mailru import MailRuAPI
from . import admin
from . model.store import StoreModel
from . model.item import ItemModel
from . model.category import CategoryModel
from . model.tier import TierModel, CurrencyModel
from . model.order import OrdersModel
from . model.campaign import CampaignsModel
class StoreServer(server.Server):
# noinspection PyShadowingNames
def __init__(self):
super(StoreServer, self).__init__()
self.db = database.Database(
host=options.db_host,
database=options.db_name,
user=options.db_username,
password=options.db_password)
self.cache = keyvalue.KeyValueStorage(
host=options.cache_host,
port=options.cache_port,
db=options.cache_db,
max_connections=options.cache_max_connections)
self.steam_api = SteamAPI(self.cache)
self.xsolla_api = XsollaAPI(self.cache)
self.mailru_api = MailRuAPI(self.cache)
self.items = ItemModel(self.db)
self.categories = CategoryModel(self.db)
self.tiers = TierModel(self.db)
self.currencies = CurrencyModel(self.db)
self.campaigns = CampaignsModel(self.db)
self.stores = StoreModel(self.db, self.items, self.tiers, self.currencies, self.campaigns)
self.orders = OrdersModel(self, self.db, self.tiers, self.campaigns)
admin.init()
def get_models(self):
return [self.currencies, self.categories, self.stores,
self.items, self.tiers, self.orders, self.campaigns]
def get_admin(self):
return {
"index": admin.RootAdminController,
"stores": admin.StoresController,
"store": admin.StoreController,
"store_settings": admin.StoreSettingsController,
"new_store_component": admin.NewStoreComponentController,
"new_store": admin.NewStoreController,
"categories": admin.CategoriesController,
"category": admin.CategoryController,
"new_category": admin.NewCategoryController,
"category_common": admin.CategoryCommonController,
"choose_category": admin.ChooseCategoryController,
"new_item": admin.NewStoreItemController,
"item": admin.StoreItemController,
"tiers": admin.StoreTiersController,
"new_tier_component": admin.NewTierComponentController,
"tier": admin.StoreTierController,
"new_tier": admin.NewStoreTierController,
"currencies": admin.CurrenciesController,
"currency": admin.CurrencyController,
"new_currency": admin.NewCurrencyController,
"orders": admin.OrdersController,
"campaigns": admin.StoreCampaignsController,
"new_campaign": admin.NewStoreCampaignController,
"campaign": admin.StoreCampaignController,
"new_campaign_item_select": admin.NewCampaignItemSelectController,
"new_campaign_item": admin.NewCampaignItemController,
"campaign_item": admin.CampaignItemController
}
def get_internal_handler(self):
return h.InternalHandler(self)
def get_metadata(self):
return {
"title": "Store",
"description": "In-A | pp Purchasing, with server validation",
"icon": "shopping-cart"
}
def get_handlers(self):
return [
(r"/store/(.*)", h.StoreHandler),
(r"/order/new", h.NewOrderHandler),
(r"/orders", h.OrdersHandler),
(r"/order/(.*)", h.OrderHandler),
(r"/hook/([0-9]+)/(.*)/(.*)", | h.WebHookHandler),
(r"/front/xsolla", h.XsollaFrontHandler),
]
if __name__ == "__main__":
stt = server.init()
access.AccessToken.init([access.public()])
server.start(StoreServer)
|
meymarce/overlord | overlord/templatetags/view_tag.py | Python | agpl-3.0 | 2,130 | 0.002817 | from django.template import Library, Node, TemplateSyntaxError, Variable
from django.conf import settings
from django.core import urlresolvers
import hashlib
import re
register = Library()
class ViewNode(Node):
def __init__(self, parser, token):
self.args = []
self.kwargs = {}
tokens = token.split_contents()
if len(tokens) < 2:
raise TemplateSyntaxError("%r tag requires one or more arguments" % token.contents.split()[0])
tag_name = tokens.pop(0)
self.url_or_view = tokens.pop(0)
for token in tokens:
equals = token.find("=")
if equals == -1:
self.args.append(token)
else:
self.kwargs[str(token[:equals])] = token[equals + 1:]
d | ef render(self, context):
print('render view tag...')
if 'request' not in context:
return ""
request = context['request']
# get the url for the view
url = Variable(self.url_or_view).resolve(context)
if not settings.USE_AJAX_REQUESTS:
# do not load the whole template, just the content, like | an ajax request
#request.is_ajax = True # not needed since the jQuery.get() is implying this
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# get the view function
view, args, kwargs = resolver.resolve(url)
try:
if callable(view):
ret = view(context['request'], *args, **kwargs).render()
return ret.rendered_content
raise Exception("%r is not callable" % view)
except:
if settings.TEMPLATE_DEBUG:
raise
else:
print('return js code for jquery')
return """<div id="%(div_id)s">loading ...</div>
<script>
$.get( "%(url)s", function( data ) {
$( "#%(div_id)s" ).html( data );
});
</script>""" % {'div_id': url.replace("/", ""), 'url': url}
return ""
register.tag('view', ViewNode)
|
Perlence/porcupy | tests/test_assign.py | Python | bsd-3-clause | 12,924 | 0.003018 | import pytest
from porcupy.compiler import compile as compile_
def test_consts():
assert compile_('X = 4') == ''
assert compile_('X = 4; y = X') == 'p1z 4'
assert compile_('X = 4; Y = X; z = Y') == 'p1z 4'
with pytest.raises(ValueError) as exc_info:
compile_('X = 4; X = 5')
assert 'cannot redefine a constant' in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
assert compile_('X = 4.5') == ''
assert 'cannot define a constant' in str(exc_info.value)
def test_numbers():
assert compile_('x = 4') == 'p1z 4'
assert compile_('x = 4.0') == 'p1z 4'
assert compile_('x = 4.5') == 'p2z 9 p1z p2z/2'
assert compile_('x = 4; y = 5') == 'p1z 4 p2z 5'
assert compile_('x = 4; x = 5') == 'p1z 4 p1z 5'
def test_other_names():
assert compile_('x = 4; y = x') == 'p1z 4 p2z p1z'
assert compile_('x = 4; y = x; z = y; y = 5') == 'p1z 4 p2z p1z p3z p2z p2z 5'
assert compile_('x = 4; y = x; x = y') == 'p1z 4 p2z p1z p1z p2z'
def test_strings():
with pytest.raises(TypeError) as exc_info:
compile_('s = "Hello World"')
assert 'cannot allocate slot of type' in str(exc_info)
def test_bools():
assert compile_('x = False') == 'p1z 0'
assert compile_('x = True') == 'p1z 1'
def test_binary_op():
assert compile_('x = 1+2') == 'p1z 3'
assert compile_('x = 1+2+3') == 'p1z 6'
assert compile_('x = 1+2*3') == 'p1z 7'
assert compile_('x = 1; y = x+2') == 'p1z 1 p2z p1z+2'
# assert compile_('x = 1; y = x+2+3') == 'p1z 1 p2z p1z+5'
assert compile_('x = 1; y = x+2+3') == 'p1z 1 p3z p1z+2 p2z p3z+3'
assert compile_('x = 1; y = x+2*3') == 'p1z 1 p2z p1z+6'
assert compile_('x = 2; y = 1+x*3') == 'p1z 2 p3z p1z*3 p2z p3z+1'
assert compile_('x = 1; y = 1-x; y = 1-x') == 'p1z 1 p3z 1 p2z p3z-p1z p3z 1 p2z p3z-p1z'
assert compile_('x = 5; y = 1/x') == 'p1z 5 p3z 1 p2z p3z/p1z'
assert compile_('x = 1; y = 1-x*5') == 'p1z 1 p3z 1 p4z p1z*5 p2z p3z-p4z'
assert compile_('x = 1; y = 1-x*5/2') == 'p1z 1 p3z p1z*5 p4z 1 p5z p3z/2 p2z p4z-p5z'
assert compile_('x = 1; y = 1-5*x/2') == 'p1z 1 p3z p1z*5 p4z 1 p5z p3z/2 p2z p4z-p5z'
assert compile_('x = 4; z = x-(-1)') == 'p1z 4 p2z p1z+1'
assert compile_('x = 4; Y = -1; z = x-Y') == 'p1z 4 p2z p1z+1'
def test_compare():
# assert compile_('x = 3 < 5') == 'p1z 1'
# assert compile_('x = 3 < 5 < 6') == 'p1z 1'
# assert compile_('x = 3 < 5 > 6') == 'p1z 0'
assert compile_('x = 3; y = x < 5') == 'p1z 3 p3z 0 # p1z < 5 ( p3z 1 ) p2z p3z'
assert compile_('x = 3; y = x < 5 < 6') == 'p1z 3 p3z 0 # p1z < 5 & 5 < 6 ( p3z 1 ) p2z p3z'
assert compile_('x = 3; y = x < 5 < 6') == 'p1z 3 p3z 0 # p1z < 5 & 5 < 6 ( p3z 1 ) p2z p3z'
def test_bool_op():
# assert compile_('x = True and True') == 'p1z 1'
# assert compile_('x = True or False') == 'p1z 1'
# assert compile_('x = True; y = True; z = x and y') == 'p1z 1 p2z 1 p3z 0 # p1z ! 0 & p2z ! 0 ( p3z p2z ) p3z p3z'
# assert compile_('x = True; y = False; z = x or y') == 'p1z 1 p2z 0 p3z 0 # p1z ! 0 | p2z ! 0 ( p3z p1z ) p3z p3z'
assert compile_('x = True; y = True; z = x and y') == 'p1z 1 p2z 1 p4z 0 # p1z ! 0 & p2z ! 0 ( p4z 1 ) p3z p4z'
assert compile_('x = True; y = False; z = x or y') == 'p1z 1 p2z 0 p4z 1 # p1z = 0 & p2z = 0 ( p4z 0 ) p3z p4z'
assert compile_('x = 3; y = x < 5 and x < 6') == 'p1z 3 p3z 0 # p1z < 5 & p1z < 6 ( p3z 1 ) p2z p3z'
assert (compile_('x = 11; y = x < 12 and (x < 13 or x < 14)') ==
'p1z 11 '
'p3z 1 # p1z >= 13 & p1z >= 14 ( p3z 0 ) '
'p4z 0 # p1z < 12 & p3z ! 0 ( p4z 1 ) p2z p4z')
assert (compile_('x = 11; y = x < 12 and (x < 13 or x < 14 or x < 15)') ==
'p1z 11 '
'p3z 1 # p1z >= 13 & p1z >= 14 & p1z >= 15 ( p3z 0 ) '
'p4z 0 # p1z < 12 & p3z ! 0 ( p4z 1 ) p2z p4z')
assert (compile_('x = 11; y = x < 12 and (x < 13 or (x < 14 or x < 15))') ==
'p1z 11 '
'p3z 1 # p1z >= 14 & p1z >= 15 ( p3z 0 ) '
'p4z 1 # p1z >= 13 & p3z = 0 ( p4z 0 ) '
'p5z 0 # p1z < 12 & p4z ! 0 ( p5z 1 ) p2z p5z')
assert (compile_('x = 1; y = x == 1 or x == x and x == 1') ==
'p1z 1 '
'p3z 0 # p1z = p1z & p1z = 1 ( p3z 1 ) '
'p4z 1 # p1z ! 1 & p3z = 0 ( p4z 0 ) p2z p4z')
assert (compile_('x = 1; y = x == 1 or x == x == 1') ==
'p1z 1 '
'p3z 0 # p1z = p1z & p1z = 1 ( p3z 1 ) '
'p4z 1 # p1z ! 1 & p3z = 0 ( p4z 0 ) p2z p4z')
def test_unary_op():
assert compile_('x = +4') == 'p1z 4'
assert compile_('x = -4') == 'p1z -4'
assert compile_('x = 4; | y = -x') == 'p1z 4 p2z p1z*-1'
assert compile_('x = ~5') == 'p1z -6'
assert compile_('x = ~-6') == 'p1z | 5'
assert compile_('x = ~True') == 'p1z -2'
assert compile_('x = ~False') == 'p1z -1'
assert compile_('x = 5; y = ~x') == 'p1z 5 p3z p1z*-1 p2z p3z-1'
assert compile_('x = not 4') == 'p1z 0'
assert compile_('x = not 0') == 'p1z 1'
assert compile_('x = not True') == 'p1z 0'
assert compile_('x = not False') == 'p1z 1'
assert compile_('x = 4; y = not x') == 'p1z 4 p3z 0 # p1z = 0 ( p3z 1 ) p2z p3z'
assert compile_('x = 3; y = not x < 5 < 6') == 'p1z 3 p3z 1 # p1z < 5 & 5 < 6 ( p3z 0 ) p2z p3z'
def test_undefined():
with pytest.raises(NameError) as exc_info:
compile_('x = y')
assert "name 'y' is not defined" in str(exc_info.value)
def test_lists():
with pytest.raises(TypeError) as exc_info:
assert compile_('x = [1, "2"]')
assert 'list items must be of the same type' in str(exc_info.value)
assert compile_('x = [1, 2]') == 'p1z 1 p2z 2 p3z 1'
assert compile_('x = 1; y = [2, 3]') == 'p1z 1 p2z 2 p3z 3 p4z 2'
assert compile_('x = [1, 2, 3]; y = x') == 'p1z 1 p2z 2 p3z 3 p4z 1 p5z p4z'
assert compile_('x = [[11, 22], [33, 44]]') == 'p1z 11 p2z 22 p3z 33 p4z 44 p5z 1 p6z 3 p7z 5'
assert compile_('x = [1, 2]; y = [3, 4]; z = [x, y]') == 'p1z 1 p2z 2 p3z 1 p4z 3 p5z 4 p6z 4 p7z p3z p8z p6z p9z 7'
# List with 99 elements in it causes a MemoryError
with pytest.raises(MemoryError) as exc_info:
compile_('x = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,'
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,'
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,'
'0,0,0,0,0,0,0,0,0,0,0,0]')
assert 'ran out of variable slots' in str(exc_info.value)
assert compile_('x = [1, 2]; y = x[0]') == 'p1z 1 p2z 2 p3z 1 p5z p3z+0 p6z p^5z p4z p6z'
assert compile_('x = [1, 2]; y = 0; z = x[y]') == 'p1z 1 p2z 2 p3z 1 p4z 0 p6z p3z+p4z p7z p^6z p5z p7z'
assert compile_('x = [1, 2]; x[0] = 5') == 'p1z 1 p2z 2 p3z 1 p4z p3z+0 p^4z 5'
with pytest.raises(IndexError) as exc_info:
compile_('x = [1, 2]; y = x[2]')
assert 'list index out of range' in str(exc_info.value)
# assert compile_('x = [0] * 3') == 'p1z 0 p2z 0 p3z 0 p4z 1'
# assert compile_('x = [11, 22, 33]; x = [11, 22, 33]') == 'p1z 11 p2z 22 p3z 33 p4z 1 p1z 11 p2z 22 p3z 33'
assert compile_('x = [1, 2]; x[0] = x[1] = 5') == 'p1z 1 p2z 2 p3z 1 p4z p3z+0 p^4z 5 p5z p3z+1 p^5z 5'
assert compile_('x = [11, 22]; y = x[0] + x[1]') == 'p1z 11 p2z 22 p3z 1 p5z p3z+0 p6z p^5z p5z p3z+1 p7z p^5z p4z p6z+p7z'
def test_const_list():
assert compile_('X = [11, 22, 33]') == 'p1z 11 p2z 22 p3z 33'
assert compile_('X = [11, 22, 33]; y = X[0]') == 'p1z 11 p2z 22 p3z 33 p4z p1z'
# Constant list is not *immutable*, so it must be possible to set
# items
assert compile_('X = [11, 22, 33]; X[0] = 44') == 'p1z 11 p2z 22 p3z 33 p4z 1 p^4z 44'
assert compile_('X = [11, 22, 33]; X[0] += 44') == 'p1z 11 p2z 22 p3z 33 p4z 1 p^4z p^4z+44'
def test_range():
assert compile_('X = range(5)') == ''
assert compile_('X = range(5, 10)') == ''
assert compile_('X = range(11, 44, 11)') == ''
assert compile_('X = range(11, 44, 11); y = X[0]; y = X[2]') == 'p1z 11 p1z 33'
with pytest.raises(TypeError) as exc_info:
a |
Widdershin/CodeEval | challenges/001-fizzbuzz.py | Python | mit | 379 | 0.047493 | import sys
file_name = sys.argv[1]
with open(file_name) as open_file:
for line in open_file. | readlines():
a, b, n = map(int, line | .split())
output = ""
for i in xrange(1, n + 1):
out = ""
spacing = " "
if i == 1:
spacing = ""
if i % a == 0:
out += "F"
if i % b == 0:
out += "B"
output += spacing + (out or str(i))
print output
|
pbmanis/acq4 | acq4/devices/MockClamp/devTemplate.py | Python | mit | 8,324 | 0.003123 | # -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file './acq4/devices/MockClamp/devTemplate.ui'
#
# Created: Thu May 29 10:20:36 2014
# by: PyQt4 UI code generator 4.9.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MockClampDevGui(object):
def setupUi(self, MockClampDevGui):
MockClampDevGui.setObjectName(_fromUtf8("MockClampDevGui"))
MockClampDevGui.resize(459, 243)
self.gridLayout = QtGui.QGridLayout(MockClampDevGui)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox_2 = QtGui.QGroupBox(MockClampDevGui)
self.groupBox_2. | setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.horizontalLayout = | QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.vcModeRadio = QtGui.QRadioButton(self.groupBox_2)
self.vcModeRadio.setObjectName(_fromUtf8("vcModeRadio"))
self.horizontalLayout.addWidget(self.vcModeRadio)
self.i0ModeRadio = QtGui.QRadioButton(self.groupBox_2)
self.i0ModeRadio.setObjectName(_fromUtf8("i0ModeRadio"))
self.horizontalLayout.addWidget(self.i0ModeRadio)
self.icModeRadio = QtGui.QRadioButton(self.groupBox_2)
self.icModeRadio.setObjectName(_fromUtf8("icModeRadio"))
self.horizontalLayout.addWidget(self.icModeRadio)
self.gridLayout_3.addLayout(self.horizontalLayout, 0, 0, 1, 2)
self.label_3 = QtGui.QLabel(self.groupBox_2)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_3.addWidget(self.label_3, 1, 0, 1, 1)
self.vcHoldingSpin = SpinBox(self.groupBox_2)
self.vcHoldingSpin.setObjectName(_fromUtf8("vcHoldingSpin"))
self.gridLayout_3.addWidget(self.vcHoldingSpin, 1, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.groupBox_2)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_3.addWidget(self.label_4, 2, 0, 1, 1)
self.icHoldingSpin = SpinBox(self.groupBox_2)
self.icHoldingSpin.setObjectName(_fromUtf8("icHoldingSpin"))
self.gridLayout_3.addWidget(self.icHoldingSpin, 2, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem, 4, 0, 1, 1)
self.label_6 = QtGui.QLabel(self.groupBox_2)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_3.addWidget(self.label_6, 3, 0, 1, 1)
self.pipOffsetSpin = SpinBox(self.groupBox_2)
self.pipOffsetSpin.setObjectName(_fromUtf8("pipOffsetSpin"))
self.gridLayout_3.addWidget(self.pipOffsetSpin, 3, 1, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 0, 0, 1, 1)
self.groupBox = QtGui.QGroupBox(MockClampDevGui)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.pipCapSpin = SpinBox(self.groupBox)
self.pipCapSpin.setObjectName(_fromUtf8("pipCapSpin"))
self.gridLayout_2.addWidget(self.pipCapSpin, 0, 1, 1, 2)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.pipResSpin = SpinBox(self.groupBox)
self.pipResSpin.setObjectName(_fromUtf8("pipResSpin"))
self.gridLayout_2.addWidget(self.pipResSpin, 1, 1, 1, 2)
self.label_5 = QtGui.QLabel(self.groupBox)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1)
self.pipJunctPotSpin = SpinBox(self.groupBox)
self.pipJunctPotSpin.setObjectName(_fromUtf8("pipJunctPotSpin"))
self.gridLayout_2.addWidget(self.pipJunctPotSpin, 2, 1, 1, 2)
self.pipBathRadio = QtGui.QRadioButton(self.groupBox)
self.pipBathRadio.setObjectName(_fromUtf8("pipBathRadio"))
self.gridLayout_2.addWidget(self.pipBathRadio, 3, 0, 1, 3)
self.pipAttachRadio = QtGui.QRadioButton(self.groupBox)
self.pipAttachRadio.setObjectName(_fromUtf8("pipAttachRadio"))
self.gridLayout_2.addWidget(self.pipAttachRadio, 5, 0, 1, 2)
self.pipWholeRadio = QtGui.QRadioButton(self.groupBox)
self.pipWholeRadio.setObjectName(_fromUtf8("pipWholeRadio"))
self.gridLayout_2.addWidget(self.pipWholeRadio, 5, 2, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem1, 6, 0, 1, 1)
self.comboBox = QtGui.QComboBox(self.groupBox)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.gridLayout_2.addWidget(self.comboBox, 4, 1, 1, 2)
self.label_7 = QtGui.QLabel(self.groupBox)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_2.addWidget(self.label_7, 4, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox, 0, 1, 1, 1)
self.retranslateUi(MockClampDevGui)
QtCore.QMetaObject.connectSlotsByName(MockClampDevGui)
def retranslateUi(self, MockClampDevGui):
MockClampDevGui.setWindowTitle(QtGui.QApplication.translate("MockClampDevGui", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("MockClampDevGui", "Clamp", None, QtGui.QApplication.UnicodeUTF8))
self.vcModeRadio.setText(QtGui.QApplication.translate("MockClampDevGui", "VC", None, QtGui.QApplication.UnicodeUTF8))
self.i0ModeRadio.setText(QtGui.QApplication.translate("MockClampDevGui", "I=0", None, QtGui.QApplication.UnicodeUTF8))
self.icModeRadio.setText(QtGui.QApplication.translate("MockClampDevGui", "IC", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MockClampDevGui", "VC Holding", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MockClampDevGui", "IC Holding", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("MockClampDevGui", "Pipette Offset", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MockClampDevGui", "Pipette", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MockClampDevGui", "Capacitance", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MockClampDevGui", "Resistance", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("MockClampDevGui", "Junct. Pot.", None, QtGui.QApplication.UnicodeUTF8))
self.pipBathRadio.setText(QtGui.QApplication.translate("MockClampDevGui", "Bath", None, QtGui.QApplication.UnicodeUTF8))
self.pipAttachRadio.setText(QtGui.QApplication.translate("MockClampDevGui", "On Cell", None, QtGui.QApplication.UnicodeUTF8))
self.pipWholeRadio.setText(QtGui.QApplication.translate("MockClampDevGui", "Whole Cell", None, QtGui.QApplication.UnicodeUTF8))
self.comboBox.setItemText(0, QtGui.QApplication.translate("MockClampDevGui", "HH", None, QtGui.QApplication.UnicodeUTF8))
self.comboBox.setItemText(1, QtGui.QApplication.translate("MockClampDevGui", "Type II", None, QtGui.QApplication.UnicodeUTF8))
self.comboBox.setItemText(2, QtGui.QApplication.translate("MockClampDevGui", "Type I", None, QtGui.QApplication.UnicodeUTF8))
self.lab |
google/it-cert-automation | Course2/areas.py | Python | apache-2.0 | 771 | 0.003891 | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under | the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRAN | TIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
def triangle(base, height):
return base*height/2
def rectangle(base, height):
return base*height
def circle(radius):
return math.pi*(radius**2)
|
cwalk/CapacitiveTouchLamp | setup.py | Python | mit | 1,311 | 0.026697 | try:
# Try using ez_setup to install setuptools if not already installed.
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Ignore import error and assume Python 3 which already has setuptools.
pass
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: | Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Adafruit_MPR121',
version = '1.1.2',
author = 'Tony DiCola',
author_email | = 'tdicola@adafruit.com',
description = 'Library for MPR121 capacitive touch sensor.',
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/adafruit/Adafruit_Python_MPR121/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.7'],
install_requires = ['Adafruit-GPIO>=0.7'],
packages = find_packages())
|
ShuffleBox/django-rcsfield | rcs/wiki/admin.py | Python | bsd-3-clause | 152 | 0.006579 | fro | m django.contrib import admin
from rcs.wiki.m | odels import WikiPage, WikiAttachment
admin.site.register(WikiPage)
admin.site.register(WikiAttachment) |
hrishioa/Aviato | kartograph/kartograph/geometry/view.py | Python | gpl-2.0 | 3,541 | 0.001977 |
from shapely.geometry import Polygon, MultiPolygon, LineString, MultiLineString, MultiPoint, Point
from kartograph.errors import KartographError
# # View
# Simple 2D coordinate transformation.
class View(object):
"""
translates a point to a view
"""
def __init__(self, bbox=None, width=None, height=None, padding=0):
self.bbox = bbox
self.width = width
self.padding = padding
self.height = height
if bbox:
self.scale = min((width - padding * 2) / bbox.width, (height - padding * 2) / bbox.height)
def project(self, pt):
bbox = self.bbox
if not bbox:
return pt
s = self.scale
h = self.height
w = self.width
px = pt[0]
py = pt[1]
x = (px - bbox.left) * s + (w - bbox.width * s) * .5
y = (py - bbox.top) * s + (h - bbox.height * s) * .5
return ((x, y), Point(x, y))[isinstance(pt, Point)]
def project_inverse(self, pt):
bbox = self.bbox
if not bbox:
return pt
s = self.scale
h = self.height
w = self.width
x = pt[0]
y = pt[1]
px = (x - (w - bbox.width * s) * .5) / s + bbox.left
py = (y - (h - bbox.height * s) * .5) / s + bbox.top
return ((px, py), Point(px, py))[isinstance(pt, Point)]
def project_geometry(self, geometry):
""" converts the given geometry to the view coordinates """
geometries = hasattr(geometry, 'geoms') and geometry.geoms or [geometry]
res = []
# at first shift polygons
#geometries = []
#for geom in unshifted_geometries:
# geometries += self._shift_polygon(geom)
for geom in geometries:
if isinstance(geom, Polygon):
res += self.project_polygon(geom)
elif isinstance(geom, LineString):
rings = self.project_linear_ring(geom)
res += map(LineString, rings)
elif isinstance(geom, Point):
res.append(self.project((geom.x, geom.y)))
else:
raise KartographError('unknown geometry type %s' % geometry)
if len(res) > 0:
if isinstance(res[0], Polygon):
if len(res) > 1:
return MultiPolygon(res)
else:
return res[0]
elif isinstance(res[0], LineString):
if len(res) > 1:
return MultiLineString(res)
else:
return LineString(res[0])
else:
if len(res) > 1:
return MultiPoint(res)
else:
return Point(res[0])
def project_polygon(self, polygon):
ext = self.project_linear_ring(polygon.exterior)
if len(ext) == 1:
pts_int = []
for interior in polygon.interiors:
pts_int | += self.project_linear_ring(interior)
return [Polygon(ext[0], pts_int)]
elif len(ext) == 0:
return []
else:
raise KartographError('unhandled case: exterior is split into multiple rings')
def project_linear_ring(self, ring):
| points = []
for pt in ring.coords:
x, y = self.project(pt)
points.append((x, y))
return [points]
def __str__(self):
return 'View(w=%f, h=%f, pad=%f, scale=%f, bbox=%s)' % (self.width, self.height, self.padding, self.scale, self.bbox)
|
ralphm/wokkel | doc/conf.py | Python | mit | 7,874 | 0.007112 | # -*- coding: utf-8 -*-
#
# Wokkel documentation build configuration file, created by
# sphinx-quickstart on Mon May 7 11:15:38 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['apilinks_sphinxext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Wokkel'
copyright = u'2003-2012, Ralph Meijer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '18.0.0'
# The full version, including alpha/beta/rc tags.
release = '18.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'listings']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# pydoctor API base URL
apilinks_base_url = 'api/'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If no | t '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypan | ts = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['localtoc.html', 'indexsidebar.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Wokkeldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Wokkel.tex', u'Wokkel Documentation',
u'Ralph Meijer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wokkel', u'Wokkel Documentation',
[u'Ralph Meijer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Wokkel', u'Wokkel Documentation',
u'Ralph Meijer', 'Wokkel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
ojii/django-filer | filer/admin/fileadmin.py | Python | mit | 4,673 | 0.00856 | from django.core.urlresolvers import reverse
from django.contrib.admin.util import unquote
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django import forms
from filer.admin.permissions import PrimitivePermissionAwareModelAdmin
from filer.models import File
# forms
class FileAdminChangeFrom(forms.ModelForm):
class Meta:
model = File
#ModelAdmins
class FileAdmin(PrimitivePermissionAwareModelAdmin):
list_display = ('label',)
list_per_page = 10
search_fields = ['name', 'original_filename','sha1',]
raw_id_fields = ('owner',)
readonly_fields = ('sha1',)
# save_as hack, because without save_as it is impossible to hide the
# save_and_add_another if save_as is False.
# To show only save_and_continue and save in the submit row we need save_as=True
# and in render_change_form() override add and change to False.
save_as = True
form = FileAdminChangeFrom
fieldsets = (
(None, {
'fields': ('name', 'owner','description')
}),
(None, {
'fields': ('is_public',)
}),
(_('Advanced'), {
'fields': ('file','sha1',),
'classes': ('collapse',),
}),
)
def response_change(self, request, obj):
'''
Overrides the default to be able to forward to the directory listing
instead of the default change_list_view
'''
r = super(FileAdmin, self).response_change(request, obj)
#print r['Location']
if r['Location']:
# it was a successful save
if r['Location'] in ['../']:
# this means it was a save: redirect to the directory view
if obj.folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.folder.id})
else:
url = reverse('admin:filer-directory_listing-unfiled_images')
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
return r
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
extra_context = {'show_delete': True}
context.update(extra_context)
return super(FileAdmin, self).render_change_form(request=requ | est, context=context, add=False, change=False, form_url=form_url, obj=obj)
def delete_view(self, request, object_id, extra_context=None):
'''
Overrides the default to enable redirecting to the directory view after
deletion of a image.
we need to fetch the object and find out who t | he parent is
before super, because super will delete the object and make it impossible
to find out the parent folder to redirect to.
'''
parent_folder = None
try:
obj = self.queryset(request).get(pk=unquote(object_id))
parent_folder = obj.folder
except self.model.DoesNotExist:
obj = None
r = super(FileAdmin, self).delete_view(request=request, object_id=object_id, extra_context=extra_context)
url = r.get("Location", None)
if url in ["../../../../","../../"]:
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-unfiled_images')
return HttpResponseRedirect(url)
return r
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(FileAdmin, self).get_urls()
#from filer import views
url_patterns = patterns('',
#url(r'^(?P<image_id>\d+)/export/$', self.admin_site.admin_view(views.export_image), name='image_filer-export_image'),
)
url_patterns.extend(urls)
return url_patterns
def get_model_perms(self, request):
"""
It seems this is only used for the list view. NICE :-)
"""
return {
'add': False,
'change': False,
'delete': False,
}
#def has_change_permission(self, request, obj=None):
# return False
#def add_view(self, request):
# return HttpResponseRedirect(reverse('admin:filer-directory_listing-root'))
#def changelist_view(self, request, extra_context=None):
# return HttpResponseRedirect(reverse('admin:filer-directory_listing-root'))
|
ANR-COMPASS/shesha | data/par/par4tests/test_sh_base.py | Python | gpl-3.0 | 2,260 | 0 | import shesha.config as conf
simul_name = "bench_scao_sh_16x16_8pix"
# loop
p_loop = conf.Param_loop()
p_loop.set_niter(100)
p_loop.set_ittime(0.002) # =1/500
# geom
p_geom = conf.Param_geom()
p_geom.set_zenithangle(0.)
# tel
p_tel = conf.Param_tel()
p_tel.set_diam(4.0)
p_tel.set_cobs(0.1 | 2)
# atmos
p_atmos = conf.Param_atmos()
p_atmos.set_r0(0.16)
p_atmos.set_nscreens(1)
p_atmos.set_frac([1.0])
p_atmos.set_alt([0.0])
p_atmos.set_windspeed([20.0])
p_atmos.set_winddir([45.])
p_atmos.set_L0([1.e5])
# target
p_target = conf.Param_target()
p_targets = [p_target]
# p_target.set_ntargets(1)
p_target.set_xpos(0.)
p_ | target.set_ypos(0.)
p_target.set_Lambda(1.65)
p_target.set_mag(10.)
# wfs
p_wfs0 = conf.Param_wfs()
p_wfss = [p_wfs0]
p_wfs0.set_type("sh")
p_wfs0.set_nxsub(8)
p_wfs0.set_npix(8)
p_wfs0.set_pixsize(0.3)
p_wfs0.set_fracsub(0.8)
p_wfs0.set_xpos(0.)
p_wfs0.set_ypos(0.)
p_wfs0.set_Lambda(0.5)
p_wfs0.set_gsmag(8.)
p_wfs0.set_optthroughput(0.5)
p_wfs0.set_zerop(1.e11)
p_wfs0.set_noise(3.)
p_wfs0.set_atmos_seen(1)
# lgs parameters
# p_wfs0.set_gsalt(90*1.e3)
# p_wfs0.set_lltx(0)
# p_wfs0.set_llty(0)
# p_wfs0.set_laserpower(10)
# p_wfs0.set_lgsreturnperwatt(1.e3)
# p_wfs0.set_proftype("Exp")
# p_wfs0.set_beamsize(0.8)
# dm
p_dm0 = conf.Param_dm()
p_dm1 = conf.Param_dm()
p_dms = [p_dm0, p_dm1]
p_dm0.set_type("pzt")
nact = p_wfs0.nxsub + 1
p_dm0.set_nact(nact)
p_dm0.set_alt(0.)
p_dm0.set_thresh(0.3)
p_dm0.set_coupling(0.2)
p_dm0.set_unitpervolt(0.01)
p_dm0.set_push4imat(100.)
p_dm1.set_type("tt")
p_dm1.set_alt(0.)
p_dm1.set_unitpervolt(0.0005)
p_dm1.set_push4imat(10.)
# centroiders
p_centroider0 = conf.Param_centroider()
p_centroiders = [p_centroider0]
p_centroider0.set_nwfs(0)
p_centroider0.set_type("cog")
# p_centroider0.set_type("corr")
# p_centroider0.set_type_fct("model")
# controllers
p_controller0 = conf.Param_controller()
p_controllers = [p_controller0]
p_controller0.set_type("ls")
p_controller0.set_nwfs([0])
p_controller0.set_ndm([0, 1])
p_controller0.set_maxcond(1500.)
p_controller0.set_delay(1.)
p_controller0.set_gain(0.4)
p_controller0.set_modopti(0)
p_controller0.set_nrec(2048)
p_controller0.set_nmodes(216)
p_controller0.set_gmin(0.001)
p_controller0.set_gmax(0.5)
p_controller0.set_ngain(500)
|
luotao1/Paddle | python/paddle/jit/__init__.py | Python | apache-2.0 | 1,478 | 0 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissio | ns and
# limitations under the License.
from __future__ import print_function
from ..fluid.dygraph.jit import save # noqa: F401
from ..fluid.dygraph.jit import load # noqa: F401
from ..fluid.dygraph.jit import TracedLayer # noqa: F401
from ..fluid.dygraph.jit import set_code_level # noqa: F401
from ..fluid.dygraph.jit import set_verbosity # noqa: F401
from ..fluid.dygraph.jit import declarative as to_static # noqa: F401
from ..fluid.dygraph.jit import not_to_static # noqa: F401
from ..fl | uid.dygraph import ProgramTranslator # noqa: F401
from ..fluid.dygraph.io import TranslatedLayer # noqa: F401
from . import dy2static # noqa: F401
__all__ = [ # noqa
'save',
'load',
'TracedLayer',
'to_static',
'ProgramTranslator',
'TranslatedLayer',
'set_code_level',
'set_verbosity',
'not_to_static'
]
|
tensorflow/tensorflow | tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py | Python | apache-2.0 | 9,770 | 0.002764 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cell wrapper v2 implementation."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras import layers
from tensorflow.python.keras.layers import rnn_cell_wrapper_v2
from tensorflow.python.keras.layers.legacy_rnn import rnn_cell_impl
from tensorflow.python.keras.legacy_tf_layers import base as legacy_base_layer
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
def testResidualWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2_with_dispatch(
np.array([[1., 1., 1.]]), dtype="float32")
m = ops.convert_to_tensor_v2_with_dispatch(
np.array([[0.1, 0.1, 0.1]]), dtype="float32")
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
wrapper_object = wrapper_type(base_cell)
children = wrapper_object._trackable_children()
wrapper_object.get_config() # Should not throw an error
self.assertIn("cell", children)
self.assertIs(children["cell"], base_cell)
g_res, m_new_res = wrapper_object(x, m)
self.evaluate([variables_lib.global_variables_initializer()])
res = self.evaluate([g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testResidualWrapperWithSlice(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2_with_dispatch(
np.array([[1., 1., 1., 1., 1.]]), dtype="float32")
m = ops.convert_to_tensor_v2_with_dispatch(
np.array([[0.1, 0.1, 0.1]]), dtype="float32")
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
g, m_new = base_cell(x, m)
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = wrapper_type(
base_cell, residual_with_slice_fn)(x, m)
self.evaluate([variables_lib.global_variables_initializ | er()])
res_g, res_g_res, res_m_new, res_m_new_res = self.evaluate(
[g, g_res, m_new, m_new_res])
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
| # States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.DeviceWrapper
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.GRUCell(3)
wrapped_cell = wrapper_type(cell, "/cpu:0")
children = wrapped_cell._trackable_children()
wrapped_cell.get_config() # Should not throw an error
self.assertIn("cell", children)
self.assertIs(children["cell"], cell)
outputs, _ = wrapped_cell(x, m)
self.assertIn("cpu:0", outputs.device.lower())
@parameterized.parameters(
[[rnn_cell_impl.DropoutWrapper, rnn_cell_wrapper_v2.DropoutWrapper],
[rnn_cell_impl.ResidualWrapper, rnn_cell_wrapper_v2.ResidualWrapper]])
def testWrapperKerasStyle(self, wrapper, wrapper_v2):
"""Tests if wrapper cell is instantiated in keras style scope."""
wrapped_cell_v2 = wrapper_v2(rnn_cell_impl.BasicRNNCell(1))
self.assertIsNone(getattr(wrapped_cell_v2, "_keras_style", None))
wrapped_cell = wrapper(rnn_cell_impl.BasicRNNCell(1))
self.assertFalse(wrapped_cell._keras_style)
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
def testWrapperWeights(self, wrapper):
"""Tests that wrapper weights contain wrapped cells weights."""
base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell")
rnn_cell = wrapper(base_cell)
rnn_layer = layers.RNN(rnn_cell)
inputs = ops.convert_to_tensor_v2_with_dispatch([[[1]]],
dtype=dtypes.float32)
rnn_layer(inputs)
wrapper_name = generic_utils.to_snake_case(wrapper.__name__)
expected_weights = ["rnn/" + wrapper_name + "/" + var for var in
("kernel:0", "recurrent_kernel:0", "bias:0")]
self.assertLen(rnn_cell.weights, 3)
self.assertCountEqual([v.name for v in rnn_cell.weights], expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.trainable_variables],
expected_weights)
self.assertCountEqual([v.name for v in rnn_cell.non_trainable_variables],
[])
self.assertCountEqual([v.name for v in rnn_cell.cell.weights],
expected_weights)
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
def testWrapperV2Caller(self, wrapper):
"""Tests that wrapper V2 is using the LayerRNNCell's caller."""
with legacy_base_layer.keras_style_scope():
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1) for _ in range(2)])
rnn_cell = wrapper(base_cell)
inputs = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32)
state = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32)
_ = rnn_cell(inputs, [state, state])
weights = base_cell._cells[0].weights
self.assertLen(weights, expected_len=2)
self.assertTrue(all("_wrapper" in v.name for v in weights))
@parameterized.parameters(
[rnn_cell_wrapper_v2.DropoutWrapper, rnn_cell_wrapper_v2.ResidualWrapper])
def testWrapperV2Build(self, wrapper):
cell = rnn_cell_impl.LSTMCell(10)
wrapper = wrapper(cell)
wrapper.build((1,))
self.assertTrue(cell.built)
def testDeviceWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.DeviceWrapper
cell = layers.LSTMCell(10)
wrapper = wrapper_cls(cell, "/cpu:0")
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
def testResidualWrapperSerialization(self):
wrapper_cls = rnn_cell_wrapper_v2.ResidualWrapper
cell = layers.LSTMCell(10)
wrapper = wrapper_cls(cell)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
self.assertDictEqual(config, reconstructed_wrapper.get_config())
self.assertIsInstance(reconstructed_wrapper, wrapper_cls)
wrapper = wrapper_cls(cell, residual_fn=lambda i, o: i + i + o)
config = wrapper.get_config()
reconstructed_wrapper = wrapper_cls.from_config(config)
# Assert the reconstructed function will perform the math correctly.
self.assertEqual(reconstructed_wrapper._residual_fn(1, 2), 4)
def residual_fn(inputs, outputs):
return inputs * 3 + outputs
wrapper = wrapper_cls(cell, residual_fn=residual_fn)
config |
qiyeboy/SpiderBook | ch01/1.4.1.py | Python | mit | 3,126 | 0.001764 | #coding:utf-8
'''
第一种方式:使用os模块中的fork方式实现多进程
import os
if __name__ == '__main__':
print 'current Process (%s) start ...'%(os.getpid())
pid = os.fork()
if pid < 0:
print 'error in fork'
elif pid == 0:
print 'I am child process(%s) and my parent process is (%s)',(os.getpid(),os.getppid())
else:
print 'I(%s) created a chlid process (%s).',(os.getpid(),pid)
'''
'''
第二种方法:使用multiprocessing模块创建多进程
import os
from multiprocessing import Process
# 子进程要执行的代码
def run_proc(name):
print 'Child process %s (%s) Running...' % (name, os.getpid())
if __name__ == '__main__':
print 'Parent process %s.' % os.getpid()
p_list=[]
for i in range(5):
p = Process(target=run_proc, args=(str(i),))
p_list.append(p)
print 'Process will start.'
p_list[i].start()
for p in p_list:
p.join()
print 'Process end.'
'''
'''
multiprocessing模块提供了一个Pool类来代表进程池对象
from multiprocessing import Pool
import os, time, random
def run_task(name):
print 'Task %s (pid = %s) is running...' % (name, os.getpid())
time.sleep(random.random() * 3)
print 'Task %s end.' % name
if __name__=='__main__':
print 'Current process %s.' % os.getpid()
p = Pool(processes=3)
for i in range(5):
p.apply_async(run_task, args=(i,))
print 'Waiting for all subprocesses done...'
p.close()
p.join()
print 'All subprocesses done.'
'''
'''
Queue进程间通信
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码:
def proc_write(q,urls):
print('Process(%s) is writing...' % os.getpid())
for url in urls:
q.put(url)
print('Put %s to queue...' % url)
time.sleep(random.random())
# 读数据进程执行的代码:
def proc_read(q):
print('Process(%s) is reading...' % os.getpid())
while True:
url = q.get(True)
print('Get %s from queue.' % url)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
proc_writer1 = Process(target=proc_write, args=(q,['url_1', 'url_2', ' | url_3']))
proc_writer2 = Process(target=proc_write, args=(q,['url_4','url_5','url_6']))
proc_reader = Process(target=proc_read, args=(q,))
# 启动子进程proc_writer,写入:
proc_writer1.start()
proc_writer2.start()
# 启动子进程proc_reader,读取:
proc_reader.start()
# 等待proc_writer结束:
proc_writer1.join()
proc_writer2.join()
# proc_reader进程里是死循环,无法等待其结束,只能强行终止:
proc_reader.terminate()
'''
'''
pipe进程间通信
import multiprocessing
import random |
import time,os
def proc_send(pipe,urls):
for url in urls:
print "Process(%s) send: %s" %(os.getpid(),url)
pipe.send(url)
time.sleep(random.random())
def proc_recv(pipe):
while True:
print "Process(%s) rev:%s" %(os.getpid(),pipe.recv())
time.sleep(random.random())
'''
|
mushkevych/scheduler | tests/test_state_machine_recomputing.py | Python | bsd-3-clause | 8,122 | 0.005541 | __author__ = 'Bohdan Mushkevych'
import unittest
try:
import mock
except ImportError:
from unittest import mock
from settings import enable_test_mode
enable_test_mode()
from constants import PROCESS_SITE_HOURLY
from synergy.db.dao.job_dao import JobDao
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.model import job, unit_of_work
from synergy.db.manager.ds_manager import BaseManager
from synergy.system.system_logger import get_logger
from synergy.scheduler.timetable import Timetable
from synergy.scheduler.state_machine_recomputing import StateMachineRecomputing
from tests.state_machine_testing_utils import *
from tests.base_fixtures import create_unit_of_work
from tests.ut_context import PROCESS_UNIT_TEST
class RecomputingSMUnitTest(unittest.TestCase):
def setUp(self):
self.logger = get_logger(PROCESS_UNIT_TEST)
self.time_table_mocked = mock.create_autospec(Timetable)
self.job_dao_mocked = mock.create_autospec(JobDao)
self.uow_dao_mocked = mock.create_autospec(UnitOfWorkDao)
self.ds_mocked = mock.create_autospec(BaseManager)
self.sm_real = StateMachineRecomputing(self.logger, self.time_table_mocked)
self.sm_real.uow_dao = self.uow_dao_mocked
self.sm_real.job_dao = self.job_dao_mocked
self.sm_real.ds = self.ds_mocked
self.sm_real.update_job = mock.Mock(side_effect=self.sm_real.update_job)
self.sm_real._process_state_final_run = mock.Mock(side_effect=self.sm_real._process_state_final_run)
self.sm_real._process_state_in_progress = mock.Mock(side_effect=self.sm_real._process_state_in_progress)
self.sm_real._compute_and_transfer_to_final_run = \
mock.Mock(side_effect=self.sm_real._compute_and_transfer_to_final_run)
def tearDown(self):
pass
def test_state_embryo(self):
""" method tests job records in STATE_EMBRYO state"""
self.sm_real.insert_and_publish_uow = then_return_uow
self.ds_mocked.highest_primary_key = mock.MagicMock(return_value=1)
self.ds_mocked.lowest_primary_key = mock.MagicMock(return_value=0)
job_record = get_job_record(job.STATE_EMBRYO, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.sm_real.update_job.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
def test_duplicatekeyerror_state_embryo(self):
""" method tests job records in STATE_EMBRYO state"""
self.sm_real._insert_uow = then_raise_uw
job_record = get_job_record(job.STATE_EMBRYO, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
try:
self.sm_real.manage_job(job_record)
self.assertTrue(False, 'UserWarning exception should have been thrown')
except UserWarning:
self.assertTrue(True)
def test_future_timeperiod_state_in_progress(self):
""" method tests job records in STATE_IN_PROGRESS state"""
job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_FUTURE_TIMEPERIOD, PROCESS_SITE_HOURLY)
manual_uow = create_unit_of_work(PROCESS_SITE_HOURLY, 0, 1, TEST_ACTUAL_TIMEPERIOD)
self.uow_dao_mocked.get_one = mock.MagicMock(return_value=manual_uow)
self.time_table_mocked.is_job_record_finalizable = mock.MagicMock(return_value=True)
self.sm_real.insert_and_publish_uow = then_return_duplicate_uow
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.sm_real.update_job.call_args_list), 0)
def test_preset_timeperiod_state_in_progress(self):
""" method tests job records in STATE_IN_PROGRESS state"""
self.time_table_mocked.is_job_record_finalizable = mock.MagicMock(return_value=True)
returns = [
create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, TEST_ACTUAL_TIMEPERIOD, unit_of_work.STATE_REQUESTED),
create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, TEST_ACTUAL_TIMEPERIOD, unit_of_work.STATE_PROCESSED)
]
def side_effects(*args):
return returns.pop(0)
self.uow_dao_mocked.get_one = mock.MagicMock(side_effect=side_effects)
self.ds_mocked.highest_primary_key = mock.MagicMock(return_value=1)
self.ds_mocked.lowest_primary_key = mock.MagicMock(return_value=0)
self.sm_real.insert_and_publish_uow = then_return_uow
job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.sm_real.update_job.call_args_list), 1)
self.assertEqual(len(self.sm_real._compute_and_transfer_to_final_run.call_args_list), 1)
self.assertEqual(len(self.sm_real._process_state_final_run.call_args_list), 0)
def test_transfer_to_final_state_from_in_progress(self):
""" method tests job records in STATE_IN_PROGR | ESS state"""
self.time_table_mocked.is_job_record_finalizable = mock.MagicMock(return_value=True)
returns = [
create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, TEST_ACTUAL_TIMEPERIOD, unit_of_work.STATE_REQUESTED),
create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, TEST_ACTUAL_TIMEPERIOD, unit_of_work.STATE_PROCESSED)
]
def side_effects(*args):
return returns.pop(0)
self.uow_dao_mocked.get_one = mock.MagicMock(side_e | ffect=side_effects)
self.ds_mocked.highest_primary_key = mock.MagicMock(return_value=1)
self.ds_mocked.lowest_primary_key = mock.MagicMock(return_value=0)
self.sm_real.insert_and_publish_uow = then_return_duplicate_uow
job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.sm_real.update_job.call_args_list), 2)
self.assertEqual(len(self.sm_real._compute_and_transfer_to_final_run.call_args_list), 1)
self.assertEqual(len(self.sm_real._process_state_final_run.call_args_list), 1)
def test_processed_state_final_run(self):
"""method tests job records in STATE_FINAL_RUN state"""
self.uow_dao_mocked.get_one = mock.MagicMock(
side_effect=lambda *_: create_unit_of_work(
PROCESS_SITE_HOURLY, 1, 1, TEST_ACTUAL_TIMEPERIOD, unit_of_work.STATE_PROCESSED))
job_record = get_job_record(job.STATE_FINAL_RUN, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.sm_real.update_job.call_args_list), 1)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 1)
def test_cancelled_state_final_run(self):
"""method tests job records in STATE_FINAL_RUN state"""
self.uow_dao_mocked.get_one = mock.MagicMock(
side_effect=lambda *_: create_unit_of_work(
PROCESS_SITE_HOURLY, 1, 1, TEST_ACTUAL_TIMEPERIOD, unit_of_work.STATE_CANCELED))
job_record = get_job_record(job.STATE_FINAL_RUN, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.sm_real.update_job.call_args_list), 1)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 1)
def test_state_skipped(self):
"""method tests job records in STATE_SKIPPED state"""
job_record = get_job_record(job.STATE_SKIPPED, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.sm_real.update_job.call_args_list), 0)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 0)
def test_state_processed(self):
"""method tests job records in STATE_PROCESSED state"""
job_record = get_job_record(job.STATE_PROCESSED, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.sm_real.update_job.call_args_list), 0)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 0)
if __name__ == '__main__':
unittest.main()
|
toly/pyrecense | recense.py | Python | mit | 3,217 | 0.001554 | #!/usr/bin/env python
# coding: utf-8
__author__ = 'toly'
import re
import os
import sys
import argparse
from collections import Counter
BAD_FUNCTIONS = ['__init__', '__unicode__', 'setUp', 'tearDown']
FUNCTION_DEFINE_REGEXP = re.compile(r'def (\w+)\(')
FUNCTION_CALL_REGEXP = re.compile(r'(\w+)\(')
def main():
"""
entry point
"""
arg_parser = create_argparser()
args = arg_parser.parse_args()
functions_definitions = []
functions_calls = []
project_files = get_py | thon_files(args.project_directory)
for file_index, filename in enume | rate(project_files):
for line in get_file_lines(filename):
# getting functions, classes and their calls
definitions = [function_name for function_name in FUNCTION_DEFINE_REGEXP.findall(line)]
if definitions:
has_bad_functions = False
for bad_function in BAD_FUNCTIONS:
if bad_function in definitions:
has_bad_functions = True
if has_bad_functions:
continue
functions_definitions += definitions
continue
calls = [function_name for function_name in FUNCTION_CALL_REGEXP.findall(line)]
if calls:
functions_calls += calls
# prepare and output statistics
definitions_counter = Counter(functions_definitions)
definitions_set = set(functions_definitions)
definitions_tuples = definitions_counter.items()
definitions_tuples = [(definition, count) for
(definition, count) in definitions_tuples
if count != 1]
definitions_tuples.sort(key=lambda (function_definition, count_definition): -count_definition)
print '\nrepited definitons:'
for definition, count in definitions_tuples:
print ' %s: %d' % (definition, count)
calls_tuples = Counter(functions_calls).items()
calls_tuples = [(call, count) for
(call, count) in calls_tuples
if call in definitions_set and count < 5]
calls_tuples.sort(key=lambda (function_call, count_call): count_call)
print '\nsmall used functions:'
for call, count in calls_tuples:
print ' %s: %d' % (call, count)
def get_python_files(folder):
"""
return full paths of python files in <folder>
"""
fullpath = os.path.abspath(folder)
for (dirpath, dirnames, filenames) in os.walk(fullpath):
for filename in filenames:
if filename[-3:] != '.py':
continue
if filename.endswith('admin.py'):
continue
if 'migrations' in dirpath:
continue
yield os.path.join(dirpath, filename)
def get_file_lines(filename):
"""
generator lines from file
"""
with open(filename) as f:
for line in f:
yield line
def create_argparser():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d', '--project-directory', type=str, help="Project directory for recense", required=True)
return arg_parser
if __name__ == '__main__':
sys.exit(main()) |
kevincobain2000/jProcessing | src/jNlp/jCabocha.py | Python | bsd-2-clause | 1,326 | 0.017391 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, subprocess, os
from subprocess import call
from tempfile import NamedTemporaryFile
def formdamage(sent):
rectify = []
for ch in sent:
try: rectify.append(ch.encode('utf-8'))
except: pass
return ''.join(rectify)
def cabocha(sent):
if os.path.exists('/home_lab_local/s1010205/tmp/'):
temp = NamedTemporaryFile(delete=False, dir='/home_lab_local/s1010205/tmp/')
else:
temp = NamedTemporaryFile(delete=False)
try: sent = sent.encode('utf-8')
except: sent = formdamage(sent)
temp.write(sent)
temp.close()
command = ['cabocha', '-f', '3']
process = subprocess.Popen(command, stdin=open(temp.name,'r'), stdout=subprocess.PIPE)
output = process.communicate | ()[0]
os.unlink(temp.name)
return unicode(output, 'utf-8')
def main():
pass
if __name__ == '__main__':
input_sentence = u'私が五年前にこの団体を仲間たちと結成したのは | マルコス疑惑などで日本のODA(政府開発援助)が問題になり、国まかせでなく、民間による国際協力が必要だと痛感したのが大きな理由です。'
print cabocha(input_sentence).encode('utf-8')
|
ratnania/pigasus | python/fem/color.py | Python | mit | 6,761 | 0.004141 | # -*- coding: UTF-8 -*-
#! /usr/bin/python
__author__="ARA"
__all__ = ['color_operator', 'color_field', 'color', 'manager',
'manager_operators', 'manager_fields']
__date__ ="$Mai 08, 2014 10:50:00 PM$"
from numpy import asarray
class myList:
def __init__(self):
self._list = []
self._currentElt = -1
@property
def list(self):
return self._list
def index(self, obj):
return self._list.index(obj)
@property
def n(self):
return len(self._list)
def __next__(self):
if self.n == 0:
raise StopIteration
self._currentElt += 1
if self._currentElt >= self.n:
self._currentElt = -1
raise StopIteration
return self._list[self._currentElt]
def __iter__(self):
return self
def __getitem__(self, key):
return self._list[key]
def append(self, obj):
self._list.append(obj)
def reset(self):
self._list = []
from .pigasusObject import *
class color(myList, pigasusObject):
def __new__(typ, *args, **kwargs):
obj = object.__new__(typ)
obj.id = None
return obj
def __init__(self, objects=[]):
pigasusObject.__init__(self)
myList.__init__(self)
for obj in objects:
self.append(obj)
# print "a new color has been created"
def append(self, obj):
if self.is_compatible(obj):
myList.append(self,obj)
else:
print("Warning: Incompatible object with the current color: will not append the current object.")
def is_compatible(self, obj):
"""
must be redefined for each color-object
"""
return True
def update(self, my_type, my_subtype=-1):
self.com.pyfem.set_color(self.id, my_type, self.n, my_subtype)
list_id = asarray([Obj.id for Obj in self._list])
self.com.pyfem.set_color_objects (self.id, list_id, len(list_id))
class color_operator(color):
def __new__(typ, *args, **kwargs):
obj = object.__new__(typ)
obj.id = None
return obj
def __init__(self, operators=[]):
self._type = None
self._spaces = None
color.__init__(self, objects=operators)
self.id = self.com.ncolors
self.com.ncolors += 1
self.com.colors.append(self)
print("### new color_operator \n")
def is_compatible(self, oper):
if oper is None:
return False
if self._type is None:
self._type = oper.type
self._spaces = oper.spaces
if self._type == oper.type \
and self._spaces[0] == oper.spaces[0] \
and self._spaces[1] == oper.spaces[1] :
return True
else:
return False
def update(self):
from .constants import COLOR_OPERATOR
color.update(self, COLOR_OPERATOR)
class color_field(color):
def __new__(typ, *args, **kwargs):
obj = object.__new__(typ)
obj.id = None
return obj
def __init__(self, fields=[]):
self._type = None
self._space = None
color.__init__(self, objects=fields)
self.id = self.com.ncolors
self.com.ncolors += 1
self.com.colors.append(self)
print("### new color_field \n")
def is_compatible(self, F):
if F is None:
return False
if self._type is None:
self._type = F.type
self._space = F.space
if self._type == F.type \
and self._space == F.space:
return True
else:
return False
def update(self):
from .constants import COLOR_FIELD
if self._type is not None:
color.update(self, COLOR_FIELD, self._type)
else:
raise("Found a color_field with subtype equal to None.")
from . import common_obj as com
class manager(color):
def __init__(self, colors=[]):
self.com = com.common_obj()
self._type = None
color.__init__(self, objects=colors)
def is_compatible(self, col):
if col is None:
return False
if self._type is None:
self._type = col[0].__class__.__name__
if self._type == col[0].__class__.__name__:
return True
else:
return False
def update(self):
self.com.pyfem.set_ncolors(self.com.ncolors)
for Obj in self._list:
Obj.update()
class manager_operators(manager):
def __init__(self, colors=[]):
self._type = "oper"
manager.__init__(self, colors=colors)
def update(self):
manager.update(self)
class manager_fields(manager):
def __init__(self, colors=[]):
self._type = "field"
manager.__init__(self, colors=colors)
def update(self):
manager.update(self)
if __name__ == '__main__':
# --------------------------------------
# basic class test
# --------------------------------------
print(">>> basic class test")
blue = color([2,22,222])
blue.append(1)
blue.append(11)
blue.append(111)
print("blue.n = ", blue.n)
for obj in blue:
print(obj)
# --------------------------------------
# --------------------------------------
# operator class test
# --------------------------------------
print(">>> operator class test")
from pigasus.gallery.poisson import | poisson
from pigasus.gallery.bilaplacian import bilaplacian
from caid.cad_geometry import square
geo = square(n=[3,3], p=[2,2])
PDE_1 = bilaplacian(geometry=geo)
PDE_2 = poisson(geometry=geo, V=PDE_1.V)
PDE_3 = poisson(geometry=geo, V=PDE_1.V)
PDE_4 = poisson(geometry=geo)
# print id(PDE_1.V), PDE_1.V.id
# print id(PDE | _2.V), PDE_2.V.id
# print "---"
# print PDE_1.operators
# print PDE_2.operators
S_1 = PDE_1.D2
S_2 = PDE_2.stiffness
S_3 = PDE_3.stiffness
S_4 = PDE_4.stiffness
green = color_operator()
red = color_operator()
red.append(S_1)
green.append(S_2)
green.append(S_3)
green.append(S_4)
print("green.n = ", green.n)
for obj in green:
print(id(obj))
# --------------------------------------
# --------------------------------------
# field class test
# --------------------------------------
print(">>> field class test")
F_1 = PDE_1.rhs
F_2 = PDE_2.rhs
U_3 = PDE_3.unknown
U_4 = PDE_4.unknown
white = color_field()
white.append(F_1)
white.append(F_2)
white.append(U_3)
white.append(U_4)
print("white.n = ", white.n)
for obj in white:
print(id(obj))
# --------------------------------------
|
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/bricksculpt_v1-2-0/functions/common/images/pixel_effects_numba.py | Python | gpl-3.0 | 4,588 | 0.000654 | # Author: Christopher Gearhart
# System imports
from numba import jit, prange
import numpy as np
# Blender imports
# NONE!
# Module imports
# NONE!
@jit(nopython=True, parallel=True)
def resize_pixels(size, channels, old_pixels, old_size):
new_pixels = np.empty(size[0] * size[1] * channels)
for col in prange(size[0]):
col1 = int((col / size[0]) * old_size[0])
for row in range(size[1]):
row1 = int((row / size[1]) * old_size[1])
pixel_number = (size[0] * row + col) * channels
pixel_number_ref = (old_size[0] * row1 + col1) * channels
for ch in range(channels):
new_pixels[pixel_number + ch] = old_pixels[pixel_number_ref + ch]
return new_pixels
@jit(nopython=True, parallel=True)
def resize_pixels_preserve_borders(size, channels, old_pixels, old_size):
new_pixels = np.empty(len(old_pixels))
offset_col = int((old_size[0] - size[0]) / 2)
offset_row = int((old_size[1] - size[1]) / 2)
for col in prange(old_size[0]):
col1 = int(((col - offset_col) / size[0]) * old_size[0])
for row in range(old_size[1]):
row1 = int(((row - offset_row) / size[1]) * old_size[1])
pixel_number = (old_size[0] * row + col) * channels
if 0 <= col1 < old_size[0] and 0 <= row1 < old_size[1]:
pixel_number_ref = (old_size[0] * row1 + col1) * channels
for ch in range(channels):
new_pixels[pixel_number + ch] = old_pixels[pixel_number_ref + ch]
else:
for ch in range(channels):
new_pixels[pixel_number + ch] = 0
return new_pixels
@jit(nopython=True, parallel=True)
def dilate_pixels_dist(old_pixels, pixel_dist, width, height):
mult = 1 if pi | xel_dist[0] > 0 else -1
new_pixels = np.empty(len(old_pixels))
# for i in prange(width * height):
# x = i / height
# row = round((x % 1) * height)
# col = round(x - (x % 1))
for col in prange(width):
for row in prange(height):
pixel_number = width * row + col
max_val = old_pixels[pixel_number]
for c in range(-pixel_dist[0], pixel_dist[0] + | 1):
for r in range(-pixel_dist[1], pixel_dist[1] + 1):
if not (0 < col + c < width and 0 < row + r < height):
continue
width_amt = abs(c) / pixel_dist[0]
height_amt = abs(r) / pixel_dist[1]
ratio = (width_amt - height_amt) / 2 + 0.5
weighted_dist = pixel_dist[0] * ratio + ((1 - ratio) * pixel_dist[1])
dist = ((abs(c)**2 + abs(r)**2) ** 0.5)
if dist > weighted_dist + 0.5:
continue
pixel_number1 = width * (row + r) + (col + c)
cur_val = old_pixels[pixel_number1]
if cur_val * mult > max_val * mult:
max_val = cur_val
new_pixels[pixel_number] = max_val
return new_pixels
@jit(nopython=True, parallel=True)
def dilate_pixels_step(old_pixels, pixel_dist, width, height):
mult = 1 if pixel_dist[0] > 0 else -1
new_pixels = np.empty(len(old_pixels))
# for i in prange(width * height):
# x = i / height
# row = round((x % 1) * height)
# col = round(x - (x % 1))
for col in prange(width):
for row in range(height):
pixel_number = width * row + col
max_val = old_pixels[pixel_number]
for c in range(-pixel_dist[0], pixel_dist[0] + 1):
if not 0 < col + c < width:
continue
pixel_number1 = width * row + (col + c)
cur_val = old_pixels[pixel_number1]
if cur_val * mult > max_val * mult:
max_val = cur_val
new_pixels[pixel_number] = max_val
old_pixels = new_pixels
new_pixels = np.empty(len(old_pixels))
for col in prange(width):
for row in range(height):
pixel_number = width * row + col
max_val = old_pixels[pixel_number]
for r in range(-pixel_dist[1], pixel_dist[1] + 1):
if not 0 < row + r < height:
continue
pixel_number1 = width * (row + r) + col
cur_val = old_pixels[pixel_number1]
if cur_val * mult > max_val * mult:
max_val = cur_val
new_pixels[pixel_number] = max_val
return new_pixels
|
wwood/graftM | graftm/run.py | Python | gpl-3.0 | 42,090 | 0.00575 | #!/usr/bin/env python3
import os
import logging
import tempfile
import shutil
from graftm.sequence_search_results import SequenceSearchResult
from graftm.graftm_output_paths import GraftMFiles
from graftm.search_table import SearchTableWriter
from graftm.sequence_searcher import SequenceSearcher
from graftm.hmmsearcher import NoInputSequencesException
from graftm.housekeeping import HouseKeeping
from graftm.summarise import Stats_And_Summary
from graftm.pplacer import Pplacer
from graftm.create import Create
from graftm.update import Update
from graftm.unpack_sequences import UnpackRawReads
from graftm.graftm_package import GraftMPackage
from graftm.expand_searcher import ExpandSearcher
from graftm.diamond import Diamond
from graftm.getaxnseq import Getaxnseq
from graftm.sequence_io import SequenceIO
from graftm.timeit import Timer
from graftm.clusterer import Clusterer
from graftm.dec | orator import Decorator
from graftm.external_program_suite import ExternalProgramSuite
from graftm.archive import Archive
from graftm.decoy_filter import DecoyFilter
from biom.util import biom_open
T=Timer()
class UnrecognisedSuffixError(Exception):
pass
class Run:
PIPELINE_ | AA = "P"
PIPELINE_NT = "D"
_MIN_VERBOSITY_FOR_ART = 3 # with 2 then, only errors are printed
PPLACER_TAXONOMIC_ASSIGNMENT = 'pplacer'
DIAMOND_TAXONOMIC_ASSIGNMENT = 'diamond'
MIN_ALIGNED_FILTER_FOR_NUCLEOTIDE_PACKAGES = 95
MIN_ALIGNED_FILTER_FOR_AMINO_ACID_PACKAGES = 30
DEFAULT_MAX_SAMPLES_FOR_KRONA = 100
NO_ORFS_EXITSTATUS = 128
def __init__(self, args):
self.args = args
self.setattributes(self.args)
def setattributes(self, args):
self.hk = HouseKeeping()
self.s = Stats_And_Summary()
if args.subparser_name == 'graft':
commands = ExternalProgramSuite(['orfm', 'nhmmer', 'hmmsearch',
'mfqe', 'pplacer',
'ktImportText', 'diamond'])
self.hk.set_attributes(self.args)
self.hk.set_euk_hmm(self.args)
if args.euk_check:self.args.search_hmm_files.append(self.args.euk_hmm_file)
self.ss = SequenceSearcher(self.args.search_hmm_files,
(None if self.args.search_only else self.args.aln_hmm_file))
self.sequence_pair_list = self.hk.parameter_checks(args)
if hasattr(args, 'reference_package'):
self.p = Pplacer(self.args.reference_package)
elif self.args.subparser_name == "create":
commands = ExternalProgramSuite(['taxit', 'FastTreeMP',
'hmmalign', 'mafft'])
self.create = Create(commands)
def summarise(self, base_list, trusted_placements, reverse_pipe, times,
hit_read_count_list, max_samples_for_krona):
'''
summarise - write summary information to file, including otu table, biom
file, krona plot, and timing information
Parameters
----------
base_list : array
list of each of the files processed by graftm, with the path and
and suffixed removed
trusted_placements : dict
dictionary of placements with entry as the key, a taxonomy string
as the value
reverse_pipe : bool
True = run reverse pipe, False = run normal pipeline
times : array
list of the recorded times for each step in the pipeline in the
format: [search_step_time, alignment_step_time, placement_step_time]
hit_read_count_list : array
list containing sublists, one for each file run through the GraftM
pipeline, each two entries, the first being the number of putative
eukaryotic reads (when searching 16S), the second being the number
of hits aligned and placed in the tree.
max_samples_for_krona: int
If the number of files processed is greater than this number, then
do not generate a krona diagram.
Returns
-------
'''
# Summary steps.
placements_list = []
for base in base_list:
# First assign the hash that contains all of the trusted placements
# to a variable to it can be passed to otu_builder, to be written
# to a file. :)
placements = trusted_placements[base]
self.s.readTax(placements, GraftMFiles(base, self.args.output_directory, False).read_tax_output_path(base))
placements_list.append(placements)
#Generate coverage table
#logging.info('Building coverage table for %s' % base)
#self.s.coverage_of_hmm(self.args.aln_hmm_file,
# self.gmf.summary_table_output_path(base),
# self.gmf.coverage_table_path(base),
# summary_dict[base]['read_length'])
logging.info('Writing summary table')
with open(self.gmf.combined_summary_table_output_path(), 'w') as f:
self.s.write_tabular_otu_table(base_list, placements_list, f)
logging.info('Writing biom file')
with biom_open(self.gmf.combined_biom_output_path(), 'w') as f:
biom_successful = self.s.write_biom(base_list, placements_list, f)
if not biom_successful:
os.remove(self.gmf.combined_biom_output_path())
logging.info('Building summary krona plot')
if len(base_list) > max_samples_for_krona:
logging.warn("Skipping creation of Krona diagram since there are too many input files. The maximum can be overridden using --max_samples_for_krona")
else:
self.s.write_krona_plot(base_list, placements_list, self.gmf.krona_output_path())
# Basic statistics
placed_reads=[len(trusted_placements[base]) for base in base_list]
self.s.build_basic_statistics(times, hit_read_count_list, placed_reads, \
base_list, self.gmf.basic_stats_path())
# Delete unnecessary files
logging.info('Cleaning up')
for base in base_list:
directions = ['forward', 'reverse']
if reverse_pipe:
for i in range(0,2):
self.gmf = GraftMFiles(base, self.args.output_directory, directions[i])
self.hk.delete([self.gmf.for_aln_path(base),
self.gmf.rev_aln_path(base),
self.gmf.conv_output_rev_path(base),
self.gmf.conv_output_for_path(base),
self.gmf.euk_free_path(base),
self.gmf.euk_contam_path(base),
self.gmf.readnames_output_path(base),
self.gmf.sto_output_path(base),
self.gmf.orf_titles_output_path(base),
self.gmf.orf_output_path(base),
self.gmf.output_for_path(base),
self.gmf.output_rev_path(base)])
else:
self.gmf = GraftMFiles(base, self.args.output_directory, False)
self.hk.delete([self.gmf.for_aln_path(base),
self.gmf.rev_aln_path(base),
self.gmf.conv_output_rev_path(base),
self.gmf.conv_output_for_path(base),
self.gmf.euk_free_path(base),
self.gmf.euk_contam_path(base),
self.gmf.readnames_output_path(base),
self.gmf.sto_output_path(base),
self.gmf.orf_titles_output_path(base),
self.gmf.orf_output_path(base),
self.gmf.output_for_path(base),
self.gmf.output_rev_pa |
idmillington/layout | tests/test_managers_box.py | Python | mit | 1,665 | 0.016817 | import unittest
from layout.datatypes import *
from layout.managers.box import *
class DummyElement(object):
def __init__(self, size):
self.size = size
def get_minimum_size(self, data):
return self.size
def render(self, rect, data):
self.rect = rect
class BoxLMTest(unittest.TestCase):
def test_center_minimum_size(self):
b = BoxLM()
b.center = DummyElement(Point(3,4))
self.assertEqual(b.get_minimum_size(None), Point(3,4))
def test_vertical_minimum_size(self):
b = BoxLM()
b.top = DummyElement(Point(4,2))
b.center = DummyElement(Point(3,4))
b.bottom = DummyElement(Point(5,1))
self.assertEqual(b.get_minimum_size(None), Point(5,7))
def test_horizontal_minimum_size(self):
b = BoxLM()
b.left = DummyElement(Point(2,4))
b.center = DummyElement(Point(3,4))
b.right = DummyElement(Point(1,5))
sel | f.assertEqual(b.get_minimum_size(None), Point(6,5))
def test_margin_minimum_size(self):
b = BoxLM()
b.top = DummyElement(Point(4,2))
b.center = DummyElement(Point(3,4))
b.bottom = DummyElement(Point(5,1))
b.margin = 1
self.ass | ertEqual(b.get_minimum_size(None), Point(5,9))
def test_all_minimum_size(self):
b = BoxLM()
b.left = DummyElement(Point(2,5))
b.top = DummyElement(Point(4,2))
b.center = DummyElement(Point(3,4))
b.bottom = DummyElement(Point(5,1))
b.right = DummyElement(Point(2,4))
b.margin = 1
self.assertEqual(b.get_minimum_size(None), Point(9,10))
|
tlienart/script2gle | s2gf.py | Python | mit | 5,785 | 0.051167 | from re import search, sub, match
from os.path import join
#
import s2gc
import s2gd
#
###########################
# LAMBDA FUNCTIONS ########
###########################
# match a certain expression at start of string
match_start = lambda expr,line: match(r'\s*(%s)[^a-zA-Z0-9]'%expr,line)
# ----------
# RECONSIDER
# ----------
# strip unwanted delimiters from string
strip_d = lambda s,d: sub(d,'',s)
# get first argument (cf getfargs)
getarg1 = lambda l: strip_d(get_fargs(l)[0],'\'')
# get next arg
getnextarg = lambda lst: lst.pop(0).lower().strip('\'')
getnextargNL = lambda lst: lst.pop(0).strip('\'')
#
###########################
# FUNCTIONS ###############
###########################
#
# # +++++++++++++++++++++++++++++++++++++++++
# # GET FARGS :
# # get arguments of function str
# #
# # <in>: string like plot(x,y,'linewidth',2.0)
# # <out>: list of arguments ['x','y','linewidth','2.0']
def get_fargs(l):
cur_stack = search(r'^\s*(?:\w+)\(\s*(.*)',l).group(1)
arg_list,cur_arg = [],''
prev_char = ''
while cur_stack:
cur_char = cur_stack[0]
is_open = cur_char in s2gd.keyopen
#
if is_open:
cur_arg += cur_char
closed_s,rest,f = find_delim(cur_stack[1:],cur_char,s2gd.keyclose[cur_char])
if f: raise s2gc.S2GSyntaxError(l,'<::found %s but could not close it::>'%cur_char)
cur_arg += closed_s+s2gd.keyclose[cur_char]
cur_stack = rest
continue
# transpose/string ambiguity: it's a string opener if after a comma or a space otherwise transpose
if cur_char == '\'' and (match(r'[\s,\']',prev_char) or prev_char==''):
cur_arg += '' if match(r'\'',prev_char) else '\'' # merging 'son''s' to 'son's'
closed_s,rest,f = find_delim(cur_stack[1:],'\'','\'')
if f: raise s2gc.S2GSyntaxError(l,'<::found %s but could not close it::>'%cur_char)
cur_arg += closed_s+'\''
cur_stack = rest
prev_char = ''
continue
# end of patterns: either split, break or get next char
elif cur_char == ',': # splitting comma
arg_list.append(cur_arg)
cur_arg,cur_stack,prev_char = '', cur_stack[1:].strip(),''
if not cur_stack:
raise s2gc.S2GSyntaxError(l,'<::misplaced comma::>')
elif cur_char == ')':
arg_list.append(cur_arg)
break
else:
cur_arg += cur_char
cur_stack = cur_stack[1:] # can throw syntax error (no end parens)
prev_char = cur_char
#
return arg_list
#
# Side function
def find_delim(s,d_open,d_close):
cur_idx = 0
inside_open = 1
cur_string = ''
while cur_idx < len(s):
cur_char = s[cur_idx]
cur_idx += 1
#
if cur_char == d_close: inside_open -= 1
elif cur_char == d_open: inside_open += 1
#
if not inside_open: break
else: cur_string += cur_char
#
return cur_string,'' if cur_idx==len(s) else s[cur_idx:],inside_open
#
# +++++++++++++++++++++++++++++++++++++++++++
# SAFE_POP
# tries to pop list, if error, return clarifying
# message
def safe_pop(lst,lbl=''):
try:
return lst.pop(0)
except IndexError,e:
raise s2gc.S2GSyntaxError(line,'<::found %s but no value(s)?::>'%lbl)
#
# +++++++++++++++++++++++++++++++++++++++++++
# ARRAY X
# extract numbers in array string '[a b c]'
#
# <in>: string like '[2.0 -2,3]'
# <out>: numbers ['2.0','-2','3']
# <rmk>: does not interpret expressions
def array_x(s):
array = []
| # strip ends (-[-stuff-]- extract stuff)
core = match(r'(\s*\[?)([^\[^\]]+)(\]?\s*)',s).group(2)
# replace ',' by space
left = sub(',',' ',core)
# ATTEMPT - if sequence
nc = left.count(':')
if nc==1: # 1:5
spl = match(r'(^[^:]+):([^:]+$)',left);
first,last = float(spl.group(1)), float(spl.group(2))
seq,cur = [str(first)], first
while cur<=last-1 | :
cur+=1
seq.append(str(cur))
array = seq
elif nc==2:
spl = match(r'(^[^:]+):([^:]+):([^:]+$)',left)
first,step,last = float(spl.group(1)), float(spl.group(2)), float(spl.group(3))
seq,cur = [str(first)],first
while cur<=last-step:
cur+=step
seq.append(str(cur))
array = seq
else:
array = left.split(' ')
return [sub(' ','',e) for e in array]
#
# +++++++++++++++++++++++++++++++++++++++++++
# GET COLOR:
# (internal) read a 'color' option and
# return something in GLE format
def get_color(optstack):
#!<DEV:EXPR>
opt = getnextarg(optstack)
color = ''
a = 0
# option given form [r,g,b,a?]
rgbsearch = search(r'\[\s*([0-9]+\.?[0-9]*|\.[0-9]*)\s*[,\s]\s*([0-9]+\.?[0-9]*|\.[0-9]*)\s*[,\s]\s*([0-9]+\.?[0-9]*|\.[0-9]*)(.*)',opt)
if rgbsearch:
r,g,b = rgbsearch.group(1,2,3)
alphasearch = search(r'([0-9]+\.?[0-9]*|\.[0-9]*)',rgbsearch.group(4))
a = '1' if not alphasearch else alphasearch.group(1)
color = 'rgba(%s,%s,%s,%s)'%(r,g,b,a)
# option is x11 name + 'alpha'
elif optstack and optstack[0].lower().strip('\'')=='alpha':
optstack.pop(0)
opta = getnextarg(optstack)
# col -> rgba (using svg2rgb dictionary see s2gd.srd)
r,g,b = s2gd.srd.get(opt,(128,128,128))
a = round(float(opta)*100)
color = 'rgba255(%i,%i,%i,%2.1f)'%(r,g,b,a)
else: # just colname
color = opt
# if in matlab format (otherwise x11 name)
if color in ['r','g','b','c','m','y','k','w']:
color = s2gd.md[color]
trsp = False if a==0 or a=='1' else True
return color,trsp,optstack
#
# +++++++++++++++++++++++++++++++++++++++++++
def close_ellipsis(l,script_stack):
# gather lines in case continuation (...)
regex = r'(.*?)(?:\.\.\.\s*(?:%s.*)?$)'%s2gd.csd['comment']
srch_cl = search(regex,l)
if srch_cl:
line_open = True
nloops = 0
l = srch_cl.group(1)
while line_open and nloops<100:
nloops += 1
lt = script_stack.pop(0)
srch_cl = search(regex,lt)
if srch_cl:
l += srch_cl.group(1)
else:
line_open = False
l+=lt
if line_open:
raise s2gc.S2GSyntaxError(l,'<::line not closed::>')
return l, script_stack
|
jacobandreas/nmn2 | extra/vqa/parse.py | Python | apache-2.0 | 4,979 | 0.002008 | #!/usr/bin/env python2
from collections import namedtuple
import itertools
import re
import sys
Node = namedtuple("Node", ["word", "tag", "parent", "rel", "path"])
BE_FORMS="is|are|was|were|has|have|had|does|do|did|be"
WH_RES = [
r"^what (\w+) (is|are)",
r"^what (is|are) the (\w+) of",
r"^what (\w+) of",
r"^(what|which|where)",
r"(%s)" % BE_FORMS,
r"^(how many)",
r"^(can|could)"
]
EDGE_RE = re.compile(r"([^()]+)\((.+)-(\d+), (.+)-(\d+)\)")
CONTENT_RE = re.compile(r"NN*|VB*|JJ*")
#CONTENT_RE = re.compile(r"NN|VB|JJ")
REL_PRECEDENCE = ["root", "nsubj", "dobj", "nsubjpass", "dep", "xcomp"]
def precedence(rel):
if "nmod:" in rel:
return len(REL_PRECEDENCE)
if "conj:" in rel:
return len(REL_PRECEDENCE) + 1
if "acl:" in rel:
return len(REL_PRECEDENCE) + 1
return REL_PRECEDENCE.index(rel)
class LfParser(object):
def __init__(self, use_relations, max_leaves, max_conjuncts):
self.use_relations = use_relations
self.max_leaves = max_leaves
self.max_conjuncts = max_conjuncts
def extract_nodes(self, content):
nodes = {}
for edge in content:
rel, w1, i1, w2, i2 = EDGE_RE.match(edge.replace("'", "")).groups()
i1 = int(i1)
i2 = int(i2)
w2, t2 = w2.rsplit("/", 1)
node = Node(w2.lower(), t2, i1, rel, [])
if i2 in nodes:
if precedence(node.rel) < precedence(nodes[i2].rel):
nodes[i2] = node
else:
nodes[i2] = node
return nodes
def annotate_paths(self, nodes):
for i, node in nodes.items():
path = node.path
at = node
hit = {i}
while at.parent in nodes:
if "nmod:" in at.rel:
path.append(at.rel.split(":")[1])
if at.parent in hit:
break
hit.add(at.parent)
at = nodes[at.parent]
def extract_predicates(self, nodes):
preds = []
for i, node in sorted(nodes.items()):
if not CONTENT_RE.match(node.tag):
continue
if re.match(BE_FORMS, node.word):
continue
pred = node.word
if len(node.path) > 0 and self.use_relations:
pred = "(%s %s)" % (node.path[0], pred)
preds.append(pred)
return list(set(preds))
def make_lfs(self, wh, content):
nodes = self.extract_nodes(content)
self.annotate_paths(nodes)
predicates = self.extract_predicates(nodes)
if self.max_leaves is not None:
predicates = predicates[:self.max_leaves]
out = []
for i in range(1, max(self.max_conjuncts + 1, len(predicates))):
comb = itertools.combinations(predicates, i)
fo | r pred_comb in comb:
if len(pred_comb) == 1:
out.append("(%s %s)" % (wh, pred_comb[0]))
else:
out.append("(%s (and %s))" % (wh, " ".join(pred_comb)))
return out
def parse_all(self, stream):
queries = | []
query_lines = []
got_question = False
question = None
for line in stream:
sline = line.strip()
if sline == "" and not got_question:
got_question = True
question = query_lines[0].lower()
query_lines = []
elif sline == "":
got_question = False
queries = None
for expr in WH_RES:
m = re.match(expr, question)
if m is None:
continue
wh = m.group(1).replace(" ", "_")
if re.match(BE_FORMS, wh):
wh = "is"
n_expr_words = len(expr.split(" "))
content = query_lines[n_expr_words:]
queries = self.make_lfs(wh, content)
success = True
break
if not queries:
queries = ["(_what _thing)"]
yield queries
query_lines = []
else:
query_lines.append(sline)
"""
This script consumes output from the Stanford parser on stdin. I run the parser as
java -mx150m -cp "$scriptdir/*:" edu.stanford.nlp.parser.lexparser.LexicalizedParser \
-outputFormat "words,typedDependencies" -outputFormatOptions "stem,collapsedDependencies,includeTags" \
-sentences newline \
edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz \
$*
"""
if __name__ == "__main__":
#parser = LfParser(use_relations=True, max_conjuncts=2, max_leaves=None)
parser = LfParser(use_relations=False, max_conjuncts=2, max_leaves=2)
for parses in parser.parse_all(sys.stdin):
print ";".join(parses)
|
BryceBesler/ENEL453-Assembler | testCases/TestCasesRan/14-03-14--21-39/assembly.py | Python | mit | 11,254 | 0.019904 | '''
Opcode d(11:8) Operand d(7:0) Operation
0 8 bits representing a constant Load constant to Reg0
1 8 bits representing a constant Load constant to Reg1
2 d7 selects register Reg0 or Reg1 Load value of selected register to the ALU accumulator
3 d7 selects register Reg0 or Reg1 Add value of selected register to ALU accumulator and store result in accumulator
4 d7 selects register Reg0 or Reg1 Subtract value of selected register to ALU accumulator and store result in accumulator
5 Not used Accumulator shift right
6 Not used Accumulator shift left
7 d7 selects register Reg0 or Reg1 AND accumulator with selected register and store result in accumulator
8 d7 selects register Reg0 or Reg1 OR accumulator with selected register and store result in accumulator
9 Not used Invert Accumulator bits
A 8 bits represent address in
instruction memory Jump to address
B 8 bits represent address in
instruction memory Jump to address if Accumulator is all zeros
C 8 bits represent address in
instruction memory Jump subroutine (program counter value is stored for the subroutine return)
D Not used Return from subroutine (restore value of program counter)
E D(3:0) selects either Reg0 '000' or
Reg1 '001' or output port P1 '010' or
output port P2 '011' or UART transmit
register '100' Write value in accumulator to selected destination
F d7 selects register Reg0 or Reg1 Store UART received byte into selected register
'''
# Imports
import argparse;
import os;
# Constants and Globals
FILE_EXTENSION = str('ass');
DOT_FILE_EXTENSION = str('.{}'.format(FILE_EXTENSION));
CONSTANT_MAX = 255; # Largest number in 8 unsi | gned bits
CONSTANT_MIN = 0; # Only using unsigned bits
ADDRESS_MAX = 255; # Largest address
ADDRESS_MIN = 0; # Can't go lower than zero
INSTRUCTION_LENGTH_DICT = {
'load' : 3,
'move' : 2,
'add' : 2,
'sub' : 2,
'sr' : 1,
'sl' : 1,
'and' : 2,
'or' : 2,
'inv' : 1,
'j' : 2,
'jaz' | : 2,
'jal' : 2,
'jr' : 1,
'wri' : 2,
'str' : 2
};
# Helper functions
def doExit(error):
print "ERROR: {}".format(error);
print "Exiting..."
os.sys.exit(1);
def printInstructions():
print """Instruction Set:
load [Constant] [Reg0, Reg1] {load constant to register}
move [Reg0, Reg1] {To Accum}
add [Reg0, Reg1] {To Accum}
sub [Reg0, Reg1] {From Accum}
sl {Shift accum left}
sr {Shift accum right}
and [Reg0, Reg1] {With Accum}
or [Reg0, Reg1] {With Accum}
inv {Invert Accum}
j [Address] {Jump to address}
jaz [Address] {Jump to address if accum zero}
jal [Address] {Jump and link (sub routine)}
jr {Jump return (From sub routine)}
wri [Reg0, Reg1, P1, P2, Tx] {Write accum to register}
str [Reg0, Reg1] {Store Rx from UART into register}
Examples:
load 25 Reg0 #Comment, Comment, Comment
load 1 Reg1
move Reg0
add Reg1
sub Reg0
wri P1 # p+++++++
List of Register:
Reg0: General Purpose Register
Reg1: General Purpose Register
P1: Register reading into first digit of seven segment display
P2: Register reading into second digit of seven segment display
UART: UART send register
Rx: UART receive register"""
return;
def getInstructionCode(lineList, LineCount):
'''Return the instruction as a binary value'''
instruction = '{0:012b}'.format(0);
if(len(lineList) != INSTRUCTION_LENGTH_DICT[lineList[0]]):
doExit("Invalid number of arguments to {0} instruciton on line {1}".format(lineList[0], LineCount));
code = lineList[0];
try:
if(code == 'load'):
# Test bounds of constant
if(int(lineList[1]) > CONSTANT_MAX or int(lineList[1]) < CONSTANT_MIN):
doExit("Invalid constant range for instruction on line {}".format(LineCount));
else:
#Determine which register we are storing it in
if(lineList[2] == 'reg0'):
instruction = '0000' + '{:08b}'.format(int(lineList[1]));
elif(lineList[2] == 'reg1'):
instruction = '0001' + '{:08b}'.format(int(lineList[1]));
else:
doExit("Unkown register {0} for load instruciton on line {1}".format(lineList[2], LineCount));
elif(code == 'move'):
#Determine which register we are storing it in
if(lineList[1] == 'reg0'):
instruction = '0010' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0010' + '10000000';
else:
doExit("Unkown register {0} for move instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'add'):
if(lineList[1] == 'reg0'):
instruction = '0011' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0011' + '10000000';
else:
doExit("Unkown register {0} for add instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'sub'):
if(lineList[1] == 'reg0'):
instruction = '0100' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0100' + '10000000';
else:
doExit("Unkown register {0} for sub instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'sr'):
instruction = '0101' + '00000000';
elif(code == 'sl'):
instruction = '0110' + '00000000';
elif(code == 'and'):
if(lineList[1] == 'reg0'):
instruction = '0111' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '0111' + '10000000';
else:
doExit("Unkown register {0} for and instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'or'):
if(lineList[1] == 'reg0'):
instruction = '1000' + '00000000';
elif(lineList[1] == 'reg1'):
instruction = '1000' + '10000000';
else:
doExit("Unkown register {0} for or instruciton on line {1}".format(lineList[1], LineCount));
elif(code == 'inv'):
instruction = '1001' + '00000000';
elif(code == 'j'):
if(int(lineList[1]) > ADDRESS_MAX or int(lineList[1]) < ADDRESS_MIN):
doExit("Invalid address range for instruction on line {}".format(LineCount));
else:
instruction = '1010' + '{:08b}'.format(int(lineList[1]));
elif(code == 'jaz'):
if(int(lineList[1]) > ADDRESS_MAX or int(lineList[1]) < ADDRESS_MIN):
doExit("Invalid address range for instruction on line {}".format(LineCount));
else:
instruction = '1011' + '{:08b}'.format(int(lineList[1]));
elif(code == 'jal'):
if(int(lineList[1]) > ADDRESS_MAX or int(lineList[1]) < ADDRESS_MIN):
doExit("Invalid address range for instruction on line {}".format(LineCount));
else:
instruction = '1100' + '{:08b}'.format(int(lineList[1]));
elif(code == 'jr'):
instruction = '1101' + '00000000';
elif(code == 'wri'):
if(lineList[1] == 'reg0'):
instruction = '1110' + '00000' + '000';
elif(lineList[1] == 'reg1'):
instruction = '1110' + '00000' + '001';
elif(lineList[1] == 'p1'):
instruction = '1110' + '00000' + '010';
elif(lineList[1] == 'p2'):
instruction = '1110' + '00000' + '011';
elif(lineList[1] == 'tx'):
instruction = '1110' + '00000' + |
pbfy0/visvis | core/baseWibjects.py | Python | bsd-3-clause | 5,953 | 0.014782 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" Module baseWibjects
Defines the Box class and the DraggableBox class.
"""
import OpenGL.GL as gl
from visvis.core import Wibject
from visvis.core import misc
from visvis.utils.pypoints import Pointset
class Box(Wibject):
""" Box(parent)
A simple, multi-purpose, rectangle object.
It implements functionality to draw itself. Most wibjects will
actually inherit from Box, rather than from Wibject.
"""
def __init__(self, parent):
Wibject.__init__(self, parent)
self._edgeColor = (0,0,0)
self._edgeWidth = 1.0
@misc.PropWithDraw
def edgeColor():
""" Get/Set the edge color of the wibject.
"""
def fget(self):
return self._edgeColor
def fset(self, value):
self._edgeColor = misc.getColor(value, 'setting edgeColor')
return locals()
@misc.PropWithDraw
| def edgeWidth():
""" Get/Set the edge width of the wibject.
"""
| def fget(self):
return self._edgeWidth
def fset(self, value):
self._edgeWidth = float(value)
return locals()
def _GetBgcolorToDraw(self):
""" Can be overloaded to indicate mouse over in buttons.
"""
return self._bgcolor
def OnDraw(self, fast=False):
# get dimensions
w, h = self.position.size
# draw plane
if self._bgcolor:
# Get positions
x1, x2 = 0, w
y1, y2 = 0, h
# Set color
clr = self._GetBgcolorToDraw()
gl.glColor(clr[0], clr[1], clr[2], 1.0)
#
gl.glBegin(gl.GL_POLYGON)
gl.glVertex2f(x1,y1)
gl.glVertex2f(x1,y2)
gl.glVertex2f(x2,y2)
gl.glVertex2f(x2,y1)
gl.glEnd()
# prepare
gl.glDisable(gl.GL_LINE_SMOOTH)
# draw edges
if self.edgeWidth and self.edgeColor:
# Get positions
# Draw edges on top of the first and last pixel
x1, x2 = 0.5, w-0.5
y1, y2 = 0.5, h-0.5
# Set color and line width
clr = self.edgeColor
gl.glColor(clr[0], clr[1], clr[2], 1.0)
gl.glLineWidth(self.edgeWidth)
#
gl.glBegin(gl.GL_LINE_LOOP)
gl.glVertex2f(x1,y1)
gl.glVertex2f(x1,y2)
gl.glVertex2f(x2,y2)
gl.glVertex2f(x2,y1)
gl.glEnd()
# clean up
gl.glEnable(gl.GL_LINE_SMOOTH)
class DraggableBox(Box):
""" DraggableBox(parent)
A Box wibject, but draggable and resizable.
Intended as a base class.
"""
def __init__(self, parent):
Box.__init__(self, parent)
# Make me draggable
self._dragStartPos = None
self._dragResizing = False
self._dragMouseOver = False
# Prepare points to draw
self._DragCalcDots()
# Bind to own events
self.eventMouseDown.Bind(self._DragOnDown)
self.eventMouseUp.Bind(self._DragOnUp)
self.eventEnter.Bind(self._DragOnEnter)
self.eventLeave.Bind(self._DragOnLeave)
self.eventPosition.Bind(self._DragCalcDots)
# Bind to figure events
self.eventMotion.Bind(self._DragOnMove)
def _DragCalcDots(self, event=None):
w,h = self.position.size
dots = Pointset(2)
#
dots.append(3,3); dots.append(3,6); dots.append(3,9)
dots.append(6,3); dots.append(6,6); dots.append(6,9)
dots.append(9,3); dots.append(9,6); dots.append(9,9)
#
dots.append(w-3, h-3); dots.append(w-3, h-6); dots.append(w-3, h-9)
dots.append(w-6, h-3); dots.append(w-6, h-6);
dots.append(w-9, h-3);
self._dots = dots
def _DragOnEnter(self, event):
self._dragMouseOver = True
self.Draw()
def _DragOnLeave(self, event):
self._dragMouseOver = False
self.Draw()
def _DragOnDown(self, event):
pos = self.position
# Store position if clicked on draggable arreas
if event.x < 10 and event.y < 10:
self._dragStartPos = event.absx, event.absy
elif event.x > pos.width-10 and event.y > pos.height-10:
self._dragStartPos = event.absx, event.absy
self._dragResizing = True
def _DragOnMove(self, event):
if not self._dragStartPos:
return
elif self._dragResizing:
self.position.w += event.absx - self._dragStartPos[0]
self.position.h += event.absy - self._dragStartPos[1]
event.owner.Draw()
else: # dragging
self.position.x += event.absx - self._dragStartPos[0]
self.position.y += event.absy - self._dragStartPos[1]
event.owner.Draw()
self._dragStartPos = event.absx, event.absy
def _DragOnUp(self, event):
self._dragStartPos = None
self._dragResizing = False
def OnDraw(self):
Box.OnDraw(self)
if self._dragMouseOver:
# Prepare
gl.glColor(0,0,0,1)
gl.glPointSize(1)
gl.glDisable(gl.GL_POINT_SMOOTH)
# Draw dots
if len(self._dots):
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glVertexPointerf(self._dots.data)
gl.glDrawArrays(gl.GL_POINTS, 0, len(self._dots))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
|
MarouenMechtri/CNG-Manager | pyocni/backends/linkcng_backend.py | Python | apache-2.0 | 16,778 | 0.009298 | # Copyright 2013 Institut Mines-Telecom - Telecom SudParis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jul 15, 2013
@author: Marouen Mechtri
@contact: marouen.mechtri@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache Lic | ense, Version | 2.0
"""
""" Note: entity represent the linkcng category.
-Attributes of this category are:
- name
- cngSRC //Path of the cng source category
- cngDST //Path of the cng destination category
- publicaddrCNGsrc
- publicaddrCNGdst
- privateNetToCNGsrc
- privateNetToCNGdst
- linkType //3 types of link: openvpn,ipsec and openflow
- tunneladdrSrc //will be initialised by the create fonction based on linkType
- tunneladdrDst //will be initialised by the create fonction based on linkType
- tunnelportSrc //will be initialised by the create fonction based on linkType
- tunnelportDst //will be initialised by the create fonction based on linkType
- tunneladdrprefix //will be initialised by the create fonction based on linkType
- tunnelinterface //will be initialised by the create fonction based on linkType
- tunnelauthenticationkey //will be initialised by the create fonction based on linkType
- intercng //Path of the intercng category
- account
- state
"""
#import pyocni.backend.backend as backend
from pyocni.backends.backend import backend_interface
import pyocni.pyocni_tools.config as config
from pyocni.backends.clientPyocni import OCCIinterfaceClient
# import drivers (openvpn, ipsec, openflow)
from pyocni.backends.drivers.DriverOVPN import *
from pyocni.backends.drivers.DriverIPSEC import *
# getting the Logger
logger = config.logger
import pyocni.pyocni_tools.config as config
class backend(backend_interface):
def create(self, entity):
'''
Create an entity (Resource or Link)
'''
if entity['attributes']['occi']['linkcng']['linkType'] == "openvpn":
database = config.get_PyOCNI_db()
nb_tunnelAdd = database.info()['doc_count'] - 10
firstdecimal = nb_tunnelAdd%255
seconddecimal = nb_tunnelAdd/255
if firstdecimal == 0:
firstdecimal=1
elif firstdecimal >= 254:
firstdecimal=1
seconddecimal+=1
entity['attributes']['occi']['linkcng']['tunneladdrSrc']="192.168." + str(seconddecimal) + "." + str(firstdecimal)
entity['attributes']['occi']['linkcng']['tunneladdrDst']="192.168." + str(seconddecimal) + "." + str(firstdecimal+1)
entity['attributes']['occi']['linkcng']['tunnelportSrc']=str(9612+(nb_tunnelAdd/2))
entity['attributes']['occi']['linkcng']['tunnelportDst']=str(9612+(nb_tunnelAdd/2))
entity['attributes']['occi']['linkcng']['tunnelinterface']="vtun" + str(nb_tunnelAdd)
logger.debug('\n[linkCNG:start]-----Setting OpenVPN tunnel parameters')
elif entity['attributes']['occi']['linkcng']['linkType'] == "ipsec":
entity['attributes']['occi']['linkcng']['tunneladdrSrc']="10.10.1.1"
entity['attributes']['occi']['linkcng']['tunneladdrDst']="10.10.1.2"
entity['attributes']['occi']['linkcng']['tunneladdrprefix']="30"
entity['attributes']['occi']['linkcng']['tunnelinterface']="tun0"
entity['attributes']['occi']['linkcng']['tunnelauthenticationkey']="testkey"
logger.debug('\n[linkCNG:start]-----Setting IPSEC tunnel parameters')
elif entity['attributes']['occi']['linkcng']['linkType'] == "openflow":
logger.debug('\n[linkCNG:start]-----Setting OpenFlow tunnel parameters')
clientupdate = OCCIinterfaceClient(config.OCNI_IP, config.OCNI_PORT, 'linkcng', entity)
clientupdate.PUT()
logger.debug('\n[linkCNG]-----Receiving POST linkcng')
logger.debug('***The create operation of the linkcng_backend***')
def read(self, entity):
'''
Get the Entity's information
'''
logger.debug('\n[linkCNG]-----Receiving GET linkcng')
logger.debug('***The read operation of the linkcng_backend***')
def update(self, old_entity, new_entity):
'''
Update an Entity's information
'''
logger.debug('\n[linkCNG]-----Receiving PUT linkcng')
logger.debug('***The update operation of the linkcng_backend***')
def delete(self, entity):
'''
Delete an Entity
'''
if(entity['attributes']['occi']['linkcng']['state'] == "0"):
logger.debug('\n[linkCNG:stop]-----link is not configured')
else:
clientaction = OCCIinterfaceClient(config.OCNI_IP, config.OCNI_PORT, 'cng', {})
clientaction.action("stop", clientaction.GetElement_pathuuid(entity['attributes']['occi']['linkcng']['cngSRC'])['uuid'])
clientaction = OCCIinterfaceClient(config.OCNI_IP, config.OCNI_PORT, 'cng', {})
clientaction.action("stop", clientaction.GetElement_pathuuid(entity['attributes']['occi']['linkcng']['cngDST'])['uuid'])
if entity['attributes']['occi']['linkcng']['linkType'] == "openvpn":
cngsrcDriver = ovpnDriver()
cngdstDriver = ovpnDriver()
publicaddrCNGsrc = entity['attributes']['occi']['linkcng']['publicaddrCNGsrc']
publicaddrCNGdst = entity['attributes']['occi']['linkcng']['publicaddrCNGdst']
privateNetToCNGsrc = entity['attributes']['occi']['linkcng']['privateNetToCNGsrc']
privateNetToCNGdst = entity['attributes']['occi']['linkcng']['privateNetToCNGdst']
tunnelinterface = entity['attributes']['occi']['linkcng']['tunnelinterface']
cngsrcDriver.stop_site_to_site_OVPN(cngsrcDriver, publicaddrCNGsrc, tunnelinterface, privateNetToCNGdst)
cngdstDriver.stop_site_to_site_OVPN(cngdstDriver, publicaddrCNGdst, tunnelinterface, privateNetToCNGsrc)
logger.debug('\n[linkCNG:stop]-----End Release of OpenVPN link')
elif entity['attributes']['occi']['linkcng']['linkType'] == "ipsec":
cngsrcDriver = ipsecDriver()
cngdstDriver = ipsecDriver()
publicaddrCNGsrc = entity['attributes']['occi']['linkcng']['publicaddrCNGsrc']
publicaddrCNGdst = entity['attributes']['occi']['linkcng']['publicaddrCNGdst']
privateNetToCNGsrc = entity['attributes']['occi']['linkcng']['privateNetToCNGsrc']
privateNetToCNGdst = entity['attributes']['occi']['linkcng']['privateNetToCNGdst']
tunnelinterface = entity['attributes']['occi']['linkcng']['tunnelinterface']
cngsrcDriver.stop_gw(cngsrcDriver, publicaddrCNGsrc, publicaddrCNGdst, privateNetToCNGdst, tunnelinterface)
cngdstDriver.stop_gw(cngdstDriver, publicaddrCNGdst, publicaddrCNGsrc, privateNetToCNGsrc, tunnelinterface)
logger.debug('\n[linkCNG:start]-----End Release of IPSEC link')
elif entity['attributes']['occi']['linkcng']['linkType'] == "openflow":
logger.debug('\n[linkCNG:stop]-----End Release of OpenFlow link')
logger.debug('\n[li |
hzlf/openbroadcast | website/apps/spf/management/commands/spf_lookup.py | Python | gpl-3.0 | 5,465 | 0.003843 | #-*- coding: utf-8 -*-
import os
import sys
import time
import re
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from optparse import make_option
from obp_legacy.models import *
from spf.models import Request, Match
from spf.util.lookup import MediaLookup, LegacyLookup
from spf.util.match import MediaMatch
from datetime import datetime
DEFAULT_LIMIT = 500
DEFAULT_OFFSET = 0
class SpfWorker(object):
def __init__(self, *args, **kwargs):
self.action = kwargs.get('action')
self.limit = DEFAULT_LIMIT
self.offset = DEFAULT_OFFSET
try:
self.swp_id = int(kwargs.get('swp_id'))
except:
self.swp_id = None
self.verbosity = int(kwargs.get('verbosity', 1))
def run(self):
print 'walker'
print 'action: %s' % self.action
print 'swp_id: %s' % self.swp_id
if self.action == 'lookup':
print 'lookup mode'
total_matches = 0
items = []
if self.swp_id:
items = Request.objects.filter(swp_id=self.swp_id)
else:
items = Request.objects.filter(status=0)[self.offset:(self.limit + self.offset)]
for item in items:
ml = MediaLookup()
try:
num_recordings, recordings, level = ml.run(item)
item.num_results = num_recordings
item.results_mb = recordings
if level > 1:
item.level = level
else:
level = None
item.status = 1
except Exception, e:
print
print
print '********* ERROR ********************'
print e
print '************************************'
item.status = 99
item.save()
if num_recordings > 0:
total_matches += 1
print '********'
print recordings
print '********'
print
print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
print 'num matches: %s' % total_matches
if self.action == 'match':
print 'match mode'
total_matches = 0
items = []
mm = MediaMatch()
if self.swp_id:
items = Request.objects.filter(swp_id=self.swp_id)
else:
| items = Request.objects.filter(status=1, num_results__gte=1)[self.offset:(self.limit + self.offset)]
for item in items:
mm.match(item)
print '---------------------------------------------'
print 'swp_id: %s' % item.swp_id
print 'title: %s' % item.title
print 'num_results: %s' % item.num_results
print 'level: %s' % item.level
print
print
| print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
print 'num matches: %s' % total_matches
if self.action == 'reset':
print 'legacy mode'
items = Request.objects.all()[self.offset:(self.limit + self.offset)]
for item in items:
item.num_results = None
item.level = None
item.status = 0
item.results_mb = None
item.save()
print
print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
Match.objects.all().delete()
if self.action == 'legacy':
print 'legacy mode'
total_matches = 0
items = []
if self.swp_id:
items = Request.objects.filter(swp_id=self.swp_id)
else:
items = Request.objects.all()[self.offset:(self.limit + self.offset)]
for item in items:
ll = LegacyLookup()
num_recordings, recordings = ll.run(item)
#item.num_results = num_recordings
#item.save()
if num_recordings > 0:
total_matches += 1
print '********'
print recordings
print '********'
print
print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
print 'num matches: %s' % total_matches
class Command(NoArgsCommand):
"""
Import directory structure into alibrary:
manage.py import_folder --path=/tmp/assets/images
"""
option_list = BaseCommand.option_list + (
make_option('--action',
action='store',
dest='action',
default=None,
help='Import files located in the path into django-filer'),
make_option('--swp_id',
action='store',
dest='swp_id',
default=None,
help='Specify an ID to run migration on'),
)
def handle_noargs(self, **options):
worker = SpfWorker(**options)
worker.run()
|
wandb/client | wandb/vendor/pygments/lexers/_cl_builtins.py | Python | mit | 14,053 | 0.000142 | # -*- coding: utf-8 -*-
"""
pygments.lexers._cl_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ANSI Common Lisp builtins.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTIN_FUNCTIONS = set(( # 638 functions
'<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+',
'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin',
'adjustable-array-p', 'adjust-array', 'allocate-instance',
'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos',
'apropos-list', 'aref', 'arithmetic-error-operands',
'arithmetic-error-operation', 'array-dimension', 'array-dimensions',
'array-displacement', 'array-element-type', 'array-has-fill-pointer-p',
'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index',
'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if',
'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1',
'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not',
'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole',
'both-case-p', 'boundp', 'break', 'broadcast-stream-streams',
'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<',
'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character',
'characterp', 'char-code', 'char-downcase', 'char-equal',
'char-greaterp', 'char-int', 'char-lessp', 'char-name',
'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase',
'cis', 'class-name', 'class-of', 'clear-input', 'clear-output',
'close', 'clrhash', | 'code-char', 'coerce', 'compile',
'compiled-function-p', 'compile-file', 'compile-file-pathname',
'compiler-macro-function', 'complement', 'complex', 'complexp',
'compute-applicable-methods', 'compute-restarts', 'concatenate',
'concatenated-stream-streams', 'conjugate', 'cons', 'consp',
'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list',
'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure',
'copy-symbol', 'copy-tree', 'cos', | 'cosh', 'count', 'count-if',
'count-if-not', 'decode-float', 'decode-universal-time', 'delete',
'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not',
'delete-package', 'denominator', 'deposit-field', 'describe',
'describe-object', 'digit-char', 'digit-char-p', 'directory',
'directory-namestring', 'disassemble', 'documentation', 'dpb',
'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream',
'ed', 'eighth', 'elt', 'encode-universal-time', 'endp',
'enough-namestring', 'ensure-directories-exist',
'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error',
'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp',
'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author',
'file-error-pathname', 'file-length', 'file-namestring',
'file-position', 'file-string-length', 'file-write-date',
'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class',
'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart',
'find-symbol', 'finish-output', 'first', 'float', 'float-digits',
'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor',
'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line',
'fround', 'ftruncate', 'funcall', 'function-keywords',
'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp',
'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf',
'gethash', 'get-internal-real-time', 'get-internal-run-time',
'get-macro-character', 'get-output-stream-string', 'get-properties',
'get-setf-expansion', 'get-universal-time', 'graphic-char-p',
'hash-table-count', 'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'host-namestring', 'identity', 'imagpart', 'import',
'initialize-instance', 'input-stream-p', 'inspect',
'integer-decode-float', 'integer-length', 'integerp',
'interactive-stream-p', 'intern', 'intersection',
'invalid-method-error', 'invoke-debugger', 'invoke-restart',
'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm',
'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type',
'lisp-implementation-version', 'list', 'list*', 'list-all-packages',
'listen', 'list-length', 'listp', 'load',
'load-logical-pathname-translations', 'log', 'logand', 'logandc1',
'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname',
'logical-pathname-translations', 'logior', 'lognand', 'lognor',
'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name',
'lower-case-p', 'machine-instance', 'machine-type', 'machine-version',
'macroexpand', 'macroexpand-1', 'macro-function', 'make-array',
'make-broadcast-stream', 'make-concatenated-stream', 'make-condition',
'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table',
'make-instance', 'make-instances-obsolete', 'make-list',
'make-load-form', 'make-load-form-saving-slots', 'make-package',
'make-pathname', 'make-random-state', 'make-sequence', 'make-string',
'make-string-input-stream', 'make-string-output-stream', 'make-symbol',
'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map',
'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl',
'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not',
'merge', 'merge-pathnames', 'method-combination-error',
'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod',
'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc',
'next-method-p', 'nintersection', 'ninth', 'no-applicable-method',
'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse',
'nset-difference', 'nset-exclusive-or', 'nstring-capitalize',
'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if',
'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not',
'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp',
'open', 'open-stream-p', 'output-stream-p', 'package-error-package',
'package-name', 'package-nicknames', 'packagep',
'package-shadowing-symbols', 'package-used-by-list', 'package-use-list',
'pairlis', 'parse-integer', 'parse-namestring', 'pathname',
'pathname-device', 'pathname-directory', 'pathname-host',
'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type',
'pathname-version', 'peek-char', 'phase', 'plusp', 'position',
'position-if', 'position-if-not', 'pprint', 'pprint-dispatch',
'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline',
'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ',
'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim',
'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if',
'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read',
'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list',
'read-from-string', 'read-line', 'read-preserving-whitespace',
'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart',
'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove',
'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method',
'remprop', 'rename-file', 'rename-package', 'replace', 'require',
'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round',
'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar',
'search', 'second', 'set', 'set-difference',
'set-dispatch-macro-character', 'set-exclusive-or',
'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char',
'seventh', 'shadow', 'shadowi |
jonatanSh/challenge-framework | challenge_framework/challenge_admin/views.py | Python | apache-2.0 | 2,353 | 0.001275 | from django.views.generic import TemplateView
from challenge_framework.mixins import ChallengeAdmin, CustomAdminSideBar
from challenge_framework.configs.urls import admin_configs, challenge_admin, group_under
from .models import ChallengeScore
class BaseViewMixin(CustomAdminSideBar):
@property
def side_bar_links(self):
if self.request.user.is_superuser:
return group_under(challenge_admin, admin_configs, "challenges")
# return admin with sub menu if admin
return challenge_admin
# Create your views here.
class Main(ChallengeAdmin, BaseViewMixin, TemplateView):
template_name = "challenge_admin/main.html"
def get_context_data(self, **kwargs):
# get score object, teams, users
# score = Chall | engeScore.objects.get(challenge__pk=self.challenge.pk)
# print(score.score, score.team)
scores = ChallengeScore.objects.filter(challenge__pk=self.challenge.pk)
response_object = {
"total_users": 0,
"users_finished": 0,
"currently_playing": 0,
"number_of_teams": 0,
"top_profiles": [
{"title": | "jonatan", "sub_information": "best player"},
{"title": "Jhonatan",
"image": "https://pbs.twimg.com/profile_images/907986314250813440/52Csownn_400x400.jpg",
"sub_information": ",".join([
"a",
"b",
"c"]) # in this example the sub information is the users in the team
},
],
"total_play_time": 0,
"total_scores": 0,
}
for score in scores:
response_object["total_users"] += len(score.team.users.all())
response_object["total_scores"] += score.score
top_five = sorted(scores, key=lambda score: score.score * -1)[:5]
response_object["top_profiles"] = [] # override
for profile in top_five:
response_object["top_profiles"].append({
"title": profile.team.team_name,
"sub_information": ",".join(
[user.username for user in profile.team.users.all()]
)
})
ctx = super(Main, self).get_context_data(**kwargs)
ctx.update(response_object)
return ctx
|
emyarod/OSS | 1_intro/6.00.1x/Week 3/Problem Set 3/radiationExposure.py | Python | mit | 935 | 0.006417 | def f(x):
import math
return 10*math.e**(math.log(0.5)/5.27 * x)
def radiationExposure(start, stop, step):
'''
Computes and returns the amount of radiation exposed
to between the start and stop times. Calls the
function f (defined for you in the grading script)
to obtain the value of the function at any point.
start: integer, the time at which exposure begins
| stop: integer, the time at which exposure ends
step: float, the width of each rectangle. You can assume that
the step size will always partition the space evenly.
returns: float, the amount of radiation exposed to
between start and stop times.
'''
def frange(start, stop, step):
while start < stop:
yield start
start += s | tep
value = 0
for time in frange(start, stop, step):
value += (f(time) * step)
return value
print radiationExposure(40, 100, 1.5) |
psnj/petl | petl/util/materialise.py | Python | mit | 3,854 | 0.000259 | from __future__ import absolute_import, print_function, division
import operator
from collections import OrderedDict
from itertools import islice
from petl.compat import izip_longest, text_type, next
from petl.util.base import asindices, Table
def listoflists(tbl):
return [list(row) for row in tbl]
Table.listoflists = listoflists
Table.lol = listoflists
def tupleoftuples(tbl):
return tuple(tuple(row) for row in tbl)
Table.tupleoftuples = tupleoftuples
Table.tot = tupleoftuples
def listoftuples(tbl):
return [tuple(row) for row in tbl]
Table.listoftuples = listoftuples
Table.lot = listoftuples
def tupleoflists(tbl):
return tuple(list(row) for row in tbl)
Table.tupleoflists = tupleoflists
Table.tol = tupleoflists
def columns(table, missing=None):
"""
Construct a :class:`dict` mapping field names to lists of values. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]]
>>> cols = etl.columns(table)
>>> cols['foo']
['a', 'b', 'b']
>>> cols['bar']
| [1, 2, 3]
See also :func:`petl.util.materialise.facetcolumns`.
"""
cols = OrderedDict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
for f in flds:
cols[f] = list()
for row in it:
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return cols
Table.columns = columns
def facetcolumns(table, key, missin | g=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct
Table.facetcolumns = facetcolumns
def cache(table, n=None):
"""
Wrap the table with a cache that caches up to `n` rows as they are initially
requested via iteration (cache all rows be default).
"""
return CacheView(table, n=n)
Table.cache = cache
class CacheView(Table):
def __init__(self, inner, n=None):
self.inner = inner
self.n = n
self.cache = list()
self.cachecomplete = False
def clearcache(self):
self.cache = list()
self.cachecomplete = False
def __iter__(self):
# serve whatever is in the cache first
for row in self.cache:
yield row
if not self.cachecomplete:
# serve the remainder from the inner iterator
it = iter(self.inner)
for row in islice(it, len(self.cache), None):
# maybe there's more room in the cache?
if not self.n or len(self.cache) < self.n:
self.cache.append(row)
yield row
# does the cache contain a complete copy of the inner table?
if not self.n or len(self.cache) < self.n:
self.cachecomplete = True
|
Fafa87/EP | ep/evalplatform/plot_comparison.py | Python | mit | 27,171 | 0.005153 | from __future__ import division # so that a / b == float(a) / b
import fire
from ep.evalplatform import draw_details
from ep.evalplatform.compatibility import plot_comparison_legacy_parse
from ep.evalplatform.parsers import *
from ep.evalplatform.parsers_image import *
from ep.evalplatform.plotting import Plotter
from ep.evalplatform.utils import *
from ep.evalplatform.yeast_datatypes import CellOccurence
SEGMENTATION_GNUPLOT_FILE = "plot_segmentation.plt"
TRACKING_GNUPLOT_FILE = "plot_tracking.plt"
parsers = [DefaultPlatformParser(), OldGroundTruthParser(), CellProfilerParser(), CellProfilerParserTracking(),
CellTracerParser(), CellIDParser(), TrackerParser(), CellSerpentParser(), CellStarParser(),
CellProfilerParserTrackingOLDTS2(), LabelImageParser(), MaskImageParser([2, 3])
]
input_type = dict([(p.symbol, p) for p in parsers])
ground_truth_parser = OldGroundTruthParser()
# Max match distance. Read from: evaluation.ini at program folder then use this default.
loaded_ini = False
cutoff = 30 # pixels
cutoff_iou = 0.3 # intersection / union
output_evaluation_details = 0
draw_evaluation_details = 0
fill_markers = False
markersize = 7
ignored_frame_size = 0
all_data_evaluated = 0
wide_plots = 0
def filter_border(celllist, image_size=(10000, 10000)):
if celllist == []:
return []
if isinstance(celllist[0], CellOccurence):
def close_to_border(cell, limits):
return not (ignored_frame_size <= cell.position[0] <= (limits[0] - ignored_frame_size) and
ignored_frame_size <= cell.position[1] <= (limits[1] - ignored_frame_size))
return [cell for cell in celllist if not cell.obligatory() or close_to_border(cell, image_size)]
elif len(celllist[0]) == 2:
return [(cell_A, cell_B) for (cell_A, cell_B) in celllist if not cell_A.obligatory() or not cell_B.obligatory()]
els | e:
print (celllist)
def read_ground_truth(path, parser=None):
"""
Returns::
[Cell]
"""
parser = parser or ground_truth_parser
debug_center.show_in_console(None, "Progress", "Reading ground truth data...")
debug_center.show_in_console(None, "Tech", "".join([" | Uses ", parser.__class__.__name__, " parser..."]))
cells = parser.load_from_file(path)
debug_center.show_in_console(None, "Progress", "Done reading ground truth data...")
return cells
def make_all_cells_important(frame_cells):
for frame_cell in frame_cells:
frame_cell[1].colour = 0
def read_results(path, parser, name):
"""
Returns::
(algorithm_name,[Cell])
"""
debug_center.show_in_console(None, "Progress", "".join(["Reading ", name, " results data..."]))
debug_center.show_in_console(None, "Tech", "".join(["Uses ", parser.__class__.__name__, " parser..."]))
cells = parser.load_from_file(path)
make_all_cells_important(cells) # cells cannot use colour temporary
debug_center.show_in_console(None, "Progress", "".join(["Done reading ", name, " result data..."]))
return name, cells
def write_to_file_tracking(stats, path):
data_sets = ([], [], [])
for (f, (p, r, ff)) in stats:
f_short = str(f)[:20]
data_sets[0].append((f_short, p))
data_sets[1].append((f_short, r))
data_sets[2].append((f_short, ff))
write_to_file(data_sets, path)
def write_to_file_segmentation(stats, path):
data_sets = ([], [], [], [])
for (f, (a, b, c, d)) in stats:
f_short = str(f)[:20]
data_sets[0].append((f_short, a))
data_sets[1].append((f_short, b))
data_sets[2].append((f_short, c))
data_sets[3].append((f_short, d))
write_to_file(data_sets, path)
def write_to_file_printable(details, path):
if details:
headers = details[0].csv_headers()
records = [d.csv_record() for d in details]
write_to_csv(headers, records, path)
else:
write_to_csv(["No details!"], [], path)
def format_prF(title, params):
(precision, recall, F) = params
return [title, "Precision: " + str(precision), "Recall: " + str(recall), "F: " + str(F)]
def format_summary(algorithm, segmentation, tracking, long_tracking):
lines = ["Algorithm: " + algorithm]
lines += format_prF("Segmentation:", segmentation[1:])
if len(tracking) != 0:
lines += format_prF("Tracking:", tracking)
if len(long_tracking) != 0:
lines += format_prF("Long-time tracking:", long_tracking)
return "\n".join(lines)
def write_summary(algorithm, segmentation, tracking, long_tracking, path):
file = open(path, "w")
summary = format_summary(algorithm, segmentation, tracking, long_tracking)
file.write(summary)
file.close()
def distance(cell_a, cell_b):
return ((cell_a.position[0] - cell_b.position[0]) ** 2 + (cell_a.position[1] - cell_b.position[1]) ** 2) ** 0.5
def find_correspondence(ground_truth, results):
"""
Greadily match if distance close enough
Input: [Cell] x2
Matching:
[(ground_truth_cell, results_cell)] -> can easily calculate false positives/negatives and cell count + tracking
"""
edges = [(g.similarity(r), (g, r)) for g in ground_truth for r in results if g.is_similar(r, cutoff, cutoff_iou)]
correspondences = []
matchedGT = set([])
matchedRes = set([])
for (d, (a, b)) in sorted(edges, key=lambda x: -x[0]):
if not b in matchedRes:
if not a in matchedGT:
correspondences.append((a, b))
matchedGT.add(a)
matchedRes.add(b)
return correspondences
def calculate_stats_segmentation(ground_truth_frame, results_frame, image_size=(100000, 100000)):
"""
Input: [Cell] x2
Result: (cell_count_results, cell_count_ground_truth, correspondences, false_positives, false_negatives)
"""
load_general_ini(CONFIG_FILE)
border_results = filter_border(results_frame, image_size)
for c in border_results:
c.colour = 1
border_groundtruth = filter_border(ground_truth_frame, image_size)
for c in border_groundtruth:
c.colour = 1
correspondence = find_correspondence(ground_truth_frame, results_frame)
border_correspondence = filter_border(correspondence, image_size)
matched_GT = [gt for gt, _ in correspondence]
matched_res = [res for _, res in correspondence]
matched_border_GT = [gt for gt, _ in border_correspondence]
matched_border_res = [res for _, res in border_correspondence]
correct_results = [SegmentationResult(gt, res) for (gt, res) in correspondence if
(gt, res) not in border_correspondence]
obligatory_results = [res for res in results_frame if res not in border_results and res not in matched_border_res]
obligatory_gt = [gt for gt in ground_truth_frame if gt not in border_groundtruth and gt not in matched_border_GT]
false_negatives = [SegmentationResult(gt, None) for gt in ground_truth_frame if
gt not in border_groundtruth and gt not in matched_GT]
false_positives = [SegmentationResult(None, res) for res in results_frame if
res not in border_results and res not in matched_res]
return (len(obligatory_results), len(obligatory_gt),
correct_results,
false_positives,
false_negatives)
def calculate_precision_recall_F_metrics(algorithm_number, real_number, correct_number):
"""
Result: (precision, recall, F)
"""
if algorithm_number == 0:
precision = 0
else:
precision = float(correct_number) / algorithm_number
if real_number == correct_number: # 0 / 0
recall = 1
else:
recall = float(correct_number) / real_number
return (precision, recall,
2 * float(correct_number) / (real_number + algorithm_number)) # precision*recall/(precision+recall))
def calculate_metrics_segmentation(params):
"""
Input: (cell_count_results, cell_count_ground_truth, correspondences, false_positives, false_negatives)
Result: (cell_count_results/cell_count_ground_truth, precision, recall, F)
"""
(cell_count_results, cell_co |
arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_broker.py | Python | apache-2.0 | 4,270 | 0.010304 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import Direction
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management import Script
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, File
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.check_process_status import check_process_status
from kafka import ensure_base_directories
import sys
import upgrade
from kafka import kafka
class KafkaBroker(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env, upgrade_type=None):
import params
env.set_params(params)
kafka(upgrade_type=upgrade_type)
'''def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
upgrade.prestart(env, "kafka-broker")'''
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
stack_select.select_packages(params.version)
# This is extremely important since it should only be called if crossing the IOP 4.2 boundary.
# This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary.
if params.version and params.upgrade_direction:
src_version = dst_version = None
if params.upgrade_direction == Direction.UPGRADE:
src_version = upgrade_summary.get_source_version("KAFKA", default_version = params.version)
dst_version = upgrade_summary.get_target_version("KAFKA", default_version = params.version)
else:
# These represent the original values during the UPGRADE direction
src_version = upgrade_summary.get_target_version("KAFKA", default_version = params.version)
dst_version = upgrade_summary.get_source_version("KAFKA", default_version = params.version)
if compare_versions(src_version, '4.2.0.0') < 0 and compare_versions(dst_version, '4.2.0.0') >= 0:
# Upgrade from IOP 4.1 to 4.2, Calling the acl migration script requires the configs to be present.
self.configure(env, upgrade_type=upgrade_type)
upgrade.run_migration(env, upgrade_type)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env, upgrade_type=upgrade_type)
daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_bin} start')
no_op_test = format('ls {params.kafka_pid_file} >/dev/null 2>&1 && ps -p `cat {params.kafka_pid_file}` >/dev/null 2>&1')
Execute(daemon_cmd,
user=params.kafka_user,
not_if=no_op_test
)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
ensure_base_directories()
daemon_cmd = format | ('source {params.conf_dir}/kafka-env.sh; {params.kafka_bin} stop')
Execute(daemon_cmd,
user=pa | rams.kafka_user,
)
File (params.kafka_pid_file,
action = "delete"
)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.kafka_pid_file)
if __name__ == "__main__":
KafkaBroker().execute()
|
3drobotics/MAVProxy | MAVProxy/modules/mavproxy_log.py | Python | gpl-3.0 | 7,465 | 0.003751 | #!/usr/bin/env python
'''log command handling'''
import time, os
from MAVProxy.modules.lib import mp_module
class LogModule(mp_module.MPModule):
def __init__(self, mpstate):
super(LogModule, self).__init__(mpstate, "log", "log transfer")
self.add_command('log', self.cmd_log, "log file handling", ['<download|status|erase|resume|cancel|list>'])
self.reset()
def reset(self):
self.download_set = set()
self.download_file = None
self.download_lognum = None
self.download_filename = None
self.download_start = None
| self.download_last_timestamp = None
self.download_ofs = 0
self.retries = 0
self.entries = {}
def mavlink_packet(self, m):
'''handle an incoming | mavlink packet'''
if m.get_type() == 'LOG_ENTRY':
self.handle_log_entry(m)
elif m.get_type() == 'LOG_DATA':
self.handle_log_data(m)
def handle_log_entry(self, m):
'''handling incoming log entry'''
if m.time_utc == 0:
tstring = ''
else:
tstring = time.ctime(m.time_utc)
self.entries[m.id] = m
print("Log %u numLogs %u lastLog %u size %u %s" % (m.id, m.num_logs, m.last_log_num, m.size, tstring))
def handle_log_data(self, m):
'''handling incoming log data'''
if self.download_file is None:
return
# lose some data
# import random
# if random.uniform(0,1) < 0.05:
# print('dropping ', str(m))
# return
if m.ofs != self.download_ofs:
self.download_file.seek(m.ofs)
self.download_ofs = m.ofs
if m.count != 0:
data = m.data[:m.count]
s = ''.join(str(chr(x)) for x in data)
self.download_file.write(s)
self.download_set.add(m.ofs // 90)
self.download_ofs += m.count
self.download_last_timestamp = time.time()
if m.count == 0 or (m.count < 90 and len(self.download_set) == 1 + (m.ofs // 90)):
dt = time.time() - self.download_start
self.download_file.close()
size = os.path.getsize(self.download_filename)
speed = size / (1000.0 * dt)
print("Finished downloading %s (%u bytes %u seconds, %.1f kbyte/sec %u retries)" % (
self.download_filename,
size,
dt, speed,
self.retries))
self.download_file = None
self.download_filename = None
self.download_set = set()
def handle_log_data_missing(self):
'''handling missing incoming log data'''
if len(self.download_set) == 0:
return
highest = max(self.download_set)
diff = set(range(highest)).difference(self.download_set)
if len(diff) == 0:
self.master.mav.log_request_data_send(self.target_system,
self.target_component,
self.download_lognum, (1 + highest) * 90, 0xffffffff)
self.retries += 1
else:
num_requests = 0
while num_requests < 20:
start = min(diff)
diff.remove(start)
end = start
while end + 1 in diff:
end += 1
diff.remove(end)
self.master.mav.log_request_data_send(self.target_system,
self.target_component,
self.download_lognum, start * 90, (end + 1 - start) * 90)
num_requests += 1
self.retries += 1
if len(diff) == 0:
break
def log_status(self):
'''show download status'''
if self.download_filename is None:
print("No download")
return
dt = time.time() - self.download_start
speed = os.path.getsize(self.download_filename) / (1000.0 * dt)
m = self.entries.get(self.download_lognum, None)
if m is None:
size = 0
else:
size = m.size
print("Downloading %s - %u/%u bytes %.1f kbyte/s (%u retries)" % (self.download_filename,
os.path.getsize(self.download_filename),
size,
speed,
self.retries))
def log_download(self, log_num, filename):
'''download a log file'''
print("Downloading log %u as %s" % (log_num, filename))
self.download_lognum = log_num
self.download_file = open(filename, "wb")
self.master.mav.log_request_data_send(self.target_system,
self.target_component,
log_num, 0, 0xFFFFFFFF)
self.download_filename = filename
self.download_set = set()
self.download_start = time.time()
self.download_last_timestamp = time.time()
self.download_ofs = 0
self.retries = 0
def cmd_log(self, args):
'''log commands'''
if len(args) < 1:
print("usage: log <list|download|erase|resume|status|cancel>")
return
if args[0] == "status":
self.log_status()
if args[0] == "list":
print("Requesting log list")
self.download_set = set()
self.master.mav.log_request_list_send(self.target_system,
self.target_component,
0, 0xffff)
elif args[0] == "erase":
self.master.mav.log_erase_send(self.target_system,
self.target_component)
elif args[0] == "resume":
self.master.mav.log_request_end_send(self.target_system,
self.target_component)
elif args[0] == "cancel":
if self.download_file is not None:
self.download_file.close()
self.reset()
elif args[0] == "download":
if len(args) < 2:
print("usage: log download <lognumber> <filename>")
return
if args[1] == 'latest':
if len(self.entries.keys()) == 0:
print("Please use log list first")
return
log_num = sorted(self.entries, key=lambda id: self.entries[id].time_utc)[-1]
else:
log_num = int(args[1])
if len(args) > 2:
filename = args[2]
else:
filename = "log%u.bin" % log_num
self.log_download(log_num, filename)
def idle_task(self):
'''handle missing log data'''
if self.download_last_timestamp is not None and time.time() - self.download_last_timestamp > 0.7:
self.download_last_timestamp = time.time()
self.handle_log_data_missing()
def init(mpstate):
'''initialise module'''
return LogModule(mpstate)
|
borntyping/python-riemann-client | riemann_client/riemann_pb2.py | Python | mit | 314 | 0 | """Wraps the riemann_pb2_py2 and riemann_pb2_py3 modules"""
import sys
__all__ = ['Event', 'Msg', 'Query', 'Attri | bute']
if sys.version_info >= (3,):
from rie | mann_client.riemann_py3_pb2 import (Event, Msg, Query, Attribute)
else:
from riemann_client.riemann_py2_pb2 import (Event, Msg, Query, Attribute)
|
Laodicean/pyGravSim | gravSim.py | Python | gpl-3.0 | 4,070 | 0.014742 | #gravSim.py
import pygame
import math
frame_count = -1
WIDTH=640
HEIGHT=480
Quit = False
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Gr | avity Sim')
objects_group = pygame.sprite.Group()
clock = pygame.time.Clock()
class Object(): #where the magic happens. Ideally there need only be one class for all of the objects in the game; saves code and makes things MUCH more simple.
def __init__(self,location = [0,0]):
self.mass = 10
self.location = location
self.velocityX = 0.0
| self.velocityY = 0.0
self.forceX = 0.0
self.forceY = 0.0
self.size = (10,10)
self.downward_gravity = 1
self.makeSprite()
def checkLocation(self):
if self.sprite.rect.left < 0 or self.sprite.rect.right > WIDTH:
self.velocityX *= -1
if self.sprite.rect.top < 0 or self.sprite.rect.bottom > HEIGHT:
self.velocityY *= -1
def makeSprite(self):
self.sprite = pygame.sprite.Sprite()
self.sprite.image = pygame.image.load('purple_circle.gif')
self.sprite.rect = self.sprite.image.get_rect()
self.sprite.rect.left = self.location[0]
self.sprite.rect.top = self.location[1]
#pygame.transform.scale(self.sprite.image,self.size)
objects_group.add(self.sprite)
def tick(self):
self.checkLocation() #Check Location, adjust force if needed.
self.forceY += self.downward_gravity #Add downward gravity to force.
#Calculate Velocity from force.
#v = u + at ==> v = u + F/m
self.velocityX = self.velocityX + (self.forceX / self.mass)
self.velocityY = self.velocityY + (self.forceY / self.mass)
#Move Object
self.sprite.rect.left += self.velocityX * delta_time/10
self.sprite.rect.top += self.velocityY * delta_time/10
#Reset Force
self.forceX = 0.0
self.forceY = 0.0
def poke(self,force,angle):
print "Poke!"
#Adds a force to the object.
if angle <0:
return
elif angle == 0:
self.forceY += force * -1
elif angle < 90:
self.forceY += math.cos(math.radians(angle))
self.forceX += math.sin(math.radians(angle))
elif angle == 90:
self.forceX += force
elif angle <180:
self.forceX += math.cos(math.radians(angle-90))
self.forceY += math.sin(math.radians(angle-90))
elif angle == 180:
self.forceY += force
elif angle <270:
self.forceY += math.cos(math.radians(angle-180)) * -1
self.forceX += math.sin(math.radians(angle-180)) * -1
elif angle == 270:
self.forceX += force * -1
elif angle <360:
self.forceX += math.cos(math.radians(angle-270)) * -1
self.forceY += math.sin(math.radians(angle-270)) * -1
else:
return
ball = Object([10,400])
while Quit != True:
delta_time = clock.tick_busy_loop(100)
frame_count += 1
#QUIT SEQUENCE
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
Quit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
ball.poke(10,0)
if event.key == pygame.K_DOWN:
ball.poke(10,180)
if event.key == pygame.K_RIGHT:
ball.poke(10,90)
if event.key == pygame.K_LEFT:
ball.poke(10,270)
#ACTION SEQUENCE
ball.tick()
#DRAW SEQUENCE
if frame_count % 20 == 0:
text_image = pygame.font.Font(None,20).render(str(int(clock.get_fps())), True, (0,0,0))
text_rect = text_image.get_rect(right = WIDTH - 5,centery=10)
screen.blit(text_image,text_rect)
objects_group.draw(screen)
pygame.display.update()
screen.fill((200,200,200))
|
timcera/mettoolbox | src/mettoolbox/evaplib.py | Python | bsd-3-clause | 23,762 | 0.000926 | # -*- coding: utf-8 -*-
"""
Functions for calculation of potential and actual evaporation
from meteorological data.
Potential and actual evaporation functions
==========================================
- E0: Calculate Penman (1948, 1956) open water evaporation.
- Em: Calculate evaporation according to Makkink (1965).
- Ept: Calculate evaporation according to Priestley and Taylor (1972).
- ET0pm: Calculate Penman Monteith reference evaporation short grass.
- Epm: Calculate Penman-Monteith evaporation (actual evapotranspiration).
- ra: Calculate aerodynamic resistance from windspeed and
roughnes parameters.
- tvardry: calculate sensible heat flux from temperature variations.
- gash79: Gash (1979) analytical rainfall interception model.
Requires and imports scipy and meteolib modules.
Compatible with Python 2.7.3.
Function descriptions
=====================
"""
import scipy
from . import meteolib
__author__ = "Dr. Maarten J. Waterloo <maarten.waterloo@acaciawater.com>"
__version__ = "1.0"
__release__ = "1.0.1"
__date__ = "June 2016"
# 14 June 2016: Fixed error in Epm function: changed multiplication by ra
# to division by ra. Thanks Spencer Whitman for pointing this out.
# Make a help entry for this library
def evaplib():
"""
Evaplib: Python libray for calculation of evaporation from meteorological data.
Parameters
----------
E0:
Calculate Penman (1948, 1956) open water evaporation.
Em:
Calculate evaporation according to Makkink (1965).
Ept:
Calculate evaporation according to Priestley and Taylor (1972).
ET0pm:
Calculate Penman Monteith reference evaporation short grass (FAO).
Epm:
Calculate Penman Monteith reference evaporation (Monteith, 1965).
ra:
Calculate from windspeed and roughnes parameters.
tvardry:
Calculate sensible heat flux from temperature variations (Vugts et al., 1993).
gash79:
Calculate rainfall interception (Gash, 1979).
Author: Dr. Maarten J. Waterloo <maarten.waterloo@acaciawater.com>.
Version 1.0.
Date: Sep 2012, last modified November June 2016.
"""
print("A libray with Python functions for calculation of")
print("evaporation from meteorological and vegetation data.\n")
print("Functions:\n")
print("- E0: Calculate Penman (1948, 1956) (open water) evaporation")
print("- Em: Calculate evaporation according to Makkink (1965)")
print("- Ept: Calculate evaporation according to Priestley and Taylor (1972).")
print("- ET0pm: Calculate Penman Monteith reference evaporation short grass.")
print("- Epm: Calculate Penman Monteith evaporation (Monteith, 1965).")
print("- ra: Calculate aerodynamic resistance.")
print(
"- tvardry: calculate sensible heat flux from temperature variations \
(Vugts et al., 1993)."
)
print("- gash79: calculate rainfall interception (Gash, 1979).\n")
print(("Author: ", __author__))
print(("Version: ", __version__))
print(("Date: ", __date__))
def ra(z=float, z0=float, d=float, u=scipy.array([])):
"""
Function to calculate aerodynamic resistance from windspeed:
.. math::
r_a = \\frac{\\left[\\ln\\frac{z-d}{z_0}\\right]^2}{k^2 \\cdot u_z}
where k is the von Karman constant set at 0.4.
Parameters:
- z: measurement height [m].
- z0: roughness length [m].
- d: displacement length [m].
- u: (array of) wind speed measured at height z [m s-1].
Returns:
- ra: (array | of) aerodynamic resistance [s m-1].
References
----------
A.S. Thom (1075), Momentum, mass and heat exchange of plant communities,
In: Monteith, J.L. Vegetation and the Atmosphere, Academic Press, London.
p. 57–109.
Examples
--------
>>> ra(3 | ,0.12,2.4,5.0)
3.2378629924752942
>>> u=([2,4,6])
>>> ra(3,0.12,2.4,u)
array([ 8.09465748, 4.04732874, 2.69821916])
"""
# Test input array/value
u = meteolib._arraytest(u)
# Calculate ra
ra = (scipy.log((z - d) / z0)) ** 2 / (0.16 * u)
return ra # aerodynamic resistanc in s/m
def E0(
airtemp=scipy.array([]),
rh=scipy.array([]),
airpress=scipy.array([]),
Rs=scipy.array([]),
Rext=scipy.array([]),
u=scipy.array([]),
alpha=0.08,
Z=0.0,
):
"""
Function to calculate daily Penman (open) water evaporation estimates:
.. math::
E_0 = \\frac{R_n \\cdot \\Delta}{\\lambda \\cdot (\\Delta + \\gamma)} + \\frac{6430000 \\cdot E_a \\cdot \\gamma}{\\lambda \\cdot (\\Delta+\\gamma)}
Parameters:
- airtemp: (array of) daily average air temperatures [Celsius].
- rh: (array of) daily average relative humidity [%].
- airpress: (array of) daily average air pressure data [Pa].
- Rs: (array of) daily incoming solar radiation [J m-2 day-1].
- Rext: (array of) daily extraterrestrial radiation [J m-2 day-1].
- u: (array of) daily average wind speed at 2 m [m s-1].
- alpha: albedo [-] set at 0.08 for open water by default.
- Z: (array of) site elevation, default is 0 m a.s.l.
Returns:
- E0: (array of) Penman open water evaporation values [mm day-1].
Notes
-----
Meteorological parameters measured at 2 m above the surface. Albedo
alpha set by default at 0.08 for open water (Valiantzas, 2006).
References
----------
- H.L. Penman (1948). Natural evaporation from open water, bare soil\
and grass. Proceedings of the Royal Society of London. Series A.\
Mathematical and Physical Sciences 193: 120-145.
- H.L. Penman (1956). Evaporation: An introductory survey. Netherlands\
Journal of Agricultural Science 4: 9-29.
- J.D. Valiantzas (2006). Simplified versions for the Penman\
evaporation equation using routine weather data. J. Hydrology 331:\
690-702.
Examples
--------
>>> # With single values and default albedo/elevation
>>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2)
6.6029208786994467
>>> # With albedo is 0.18 instead of default and default elevation
>>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2,alpha=0.18)
5.9664248091431968
>>> # With standard albedo and Z= 250.0 m
>>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2,Z=250.0)
6.6135588207586284
>>> # With albedo alpha = 0.18 and elevation Z = 1000 m a.s.l.
>>> E0(20.67,67.0,101300.0,22600000.,42000000.,3.2,0.18,1000.)
6.00814764682986
"""
# Test input array/value
airtemp, rh, airpress, Rs, Rext, u = meteolib._arraytest(
airtemp, rh, airpress, Rs, Rext, u
)
# Set constants
sigma = 4.903e-3 # Stefan Boltzmann constant J/m2/K4/d
# Calculate Delta, gamma and lambda
DELTA = meteolib.Delta_calc(airtemp) # [Pa/K]
gamma = meteolib.gamma_calc(airtemp, rh, airpress) # [Pa/K]
Lambda = meteolib.L_calc(airtemp) # [J/kg]
# Calculate saturated and actual water vapour pressures
es = meteolib.es_calc(airtemp) # [Pa]
ea = meteolib.ea_calc(airtemp, rh) # [Pa]
# calculate radiation components (J/m2/day)
Rns = (1.0 - alpha) * Rs # Shortwave component [J/m2/d]
Rs0 = (0.75 + 2e-5 * Z) * Rext # Calculate clear sky radiation Rs0
f = 1.35 * Rs / Rs0 - 0.35
epsilom = 0.34 - 0.14 * scipy.sqrt(ea / 1000)
Rnl = f * epsilom * sigma * (airtemp + 273.15) ** 4 # Longwave component [J/m2/d]
Rnet = Rns - Rnl # Net radiation [J/m2/d]
Ea = (1 + 0.536 * u) * (es / 1000 - ea / 1000)
E0 = (
DELTA / (DELTA + gamma) * Rnet / Lambda
+ gamma / (DELTA + gamma) * 6430000 * Ea / Lambda
)
return E0
def ET0pm(
airtemp=scipy.array([]),
rh=scipy.array([]),
airpress=scipy.array([]),
Rs=scipy.array([]),
Rext=scipy.array([]),
u=scipy.array([]),
Z=0.0,
):
"""
Function to calculate daily Penman Monteith reference evaporation estimates.
Parameters:
- airtemp: (array of) daily average air |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.