code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from __future__ import absolute_import
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from sentry import constants
from sentry.models import OrganizationMemberType
from sentry.plugins import plugins, NotificationPlugin
from sentry.web.forms.projects import NotificationSettingsForm
from sentry.web.frontend.base import ProjectView
from sentry.web.helpers import plugin_config
OK_SETTINGS_SAVED = _('Your settings were saved successfully.')
class ProjectNotificationsView(ProjectView):
required_access = OrganizationMemberType.ADMIN
def _iter_plugins(self):
for plugin in plugins.all(version=1):
if not isinstance(plugin, NotificationPlugin):
continue
yield plugin
def _handle_enable_plugin(self, request, project):
plugin = plugins.get(request.POST['plugin'])
plugin.set_option('enabled', True, project)
messages.add_message(
request, messages.SUCCESS,
constants.OK_PLUGIN_ENABLED.format(name=plugin.get_title()),
)
def _handle_disable_plugin(self, request, project):
plugin = plugins.get(request.POST['plugin'])
plugin.set_option('enabled', False, project)
messages.add_message(
request, messages.SUCCESS,
constants.OK_PLUGIN_DISABLED.format(name=plugin.get_title()),
)
def handle(self, request, organization, team, project):
op = request.POST.get('op')
if op == 'enable':
self._handle_enable_plugin(request, project)
return HttpResponseRedirect(request.path)
elif op == 'disable':
self._handle_disable_plugin(request, project)
return HttpResponseRedirect(request.path)
if op == 'save-settings':
general_form = NotificationSettingsForm(
data=request.POST,
prefix='general',
initial={
'subject_prefix': project.get_option(
'mail:subject_prefix', settings.EMAIL_SUBJECT_PREFIX),
},
)
if general_form.is_valid():
project.update_option(
'mail:subject_prefix', general_form.cleaned_data['subject_prefix'])
messages.add_message(
request, messages.SUCCESS,
OK_SETTINGS_SAVED)
return HttpResponseRedirect(request.path)
else:
general_form = NotificationSettingsForm(
prefix='general',
initial={
'subject_prefix': project.get_option(
'mail:subject_prefix', settings.EMAIL_SUBJECT_PREFIX),
},
)
enabled_plugins = []
other_plugins = []
for plugin in self._iter_plugins():
if plugin.is_enabled(project):
content = plugin.get_notification_doc_html()
form = plugin.project_conf_form
if form is not None:
action, view = plugin_config(plugin, project, request)
if action == 'redirect':
messages.add_message(
request, messages.SUCCESS,
constants.OK_PLUGIN_SAVED.format(name=plugin.get_title()),
)
return HttpResponseRedirect(request.path)
enabled_plugins.append((plugin, mark_safe(content + view)))
elif content:
enabled_plugins.append((plugin, mark_safe(content)))
else:
other_plugins.append(plugin)
context = {
'page': 'notifications',
'enabled_plugins': enabled_plugins,
'other_plugins': other_plugins,
'general_form': general_form,
}
return self.respond('sentry/project-notifications.html', context)
|
llonchj/sentry
|
src/sentry/web/frontend/project_notifications.py
|
Python
|
bsd-3-clause
| 4,114
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the file loading mechanism.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import iris
@tests.skip_data
class TestFileLoad(tests.IrisTest):
def _test_file(self, src_path, reference_filename):
"""
Checks the result of loading the given file spec, or creates the
reference file if it doesn't exist.
"""
cubes = iris.load_raw(tests.get_data_path(src_path))
self.assertCML(cubes, ['file_load', reference_filename])
def test_no_file(self):
# Test an IOError is received when a filename is given which doesn't match any files
real_file = ['PP', 'globClim1', 'theta.pp']
non_existant_file = ['PP', 'globClim1', 'no_such_file*']
with self.assertRaises(IOError):
iris.load(tests.get_data_path(non_existant_file))
with self.assertRaises(IOError):
iris.load([tests.get_data_path(non_existant_file), tests.get_data_path(real_file)])
with self.assertRaises(IOError):
iris.load([tests.get_data_path(real_file), tests.get_data_path(non_existant_file)])
def test_single_file(self):
src_path = ['PP', 'globClim1', 'theta.pp']
self._test_file(src_path, 'theta_levels.cml')
def test_star_wildcard(self):
src_path = ['PP', 'globClim1', '*_wind.pp']
self._test_file(src_path, 'wind_levels.cml')
def test_query_wildcard(self):
src_path = ['PP', 'globClim1', '?_wind.pp']
self._test_file(src_path, 'wind_levels.cml')
def test_charset_wildcard(self):
src_path = ['PP', 'globClim1', '[rstu]_wind.pp']
self._test_file(src_path, 'u_wind_levels.cml')
def test_negative_charset_wildcard(self):
src_path = ['PP', 'globClim1', '[!rstu]_wind.pp']
self._test_file(src_path, 'v_wind_levels.cml')
def test_empty_file(self):
with self.temp_filename(suffix='.pp') as temp_filename:
with open(temp_filename, "a") as file:
with self.assertRaises(iris.exceptions.TranslationError):
iris.load(temp_filename)
if __name__ == "__main__":
tests.main()
|
QuLogic/iris
|
lib/iris/tests/test_file_load.py
|
Python
|
gpl-3.0
| 3,064
|
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import DoubanProvider
class DoubanTests(OAuth2TestsMixin, TestCase):
provider_id = DoubanProvider.id
def get_mocked_response(self):
return MockedResponse(200, """
{"name": "guoqiao",
"created": "2009-02-18 01:07:52",
"is_suicide": false,
"alt": "http://www.douban.com/people/qguo/",
"avatar": "http://img3.douban.com/icon/u3659811-3.jpg",
"signature": "",
"uid": "qguo",
"is_banned": false,
"desc": "\u4e0d\u662f\u5f88\u7231\u8bfb\u4e66",
"type": "user",
"id": "3659811",
"large_avatar": "http://img3.douban.com/icon/up3659811-3.jpg"}
""")
|
Alexander-M-Waldman/local_currency_site
|
lib/python2.7/site-packages/allauth/socialaccount/providers/douban/tests.py
|
Python
|
gpl-3.0
| 838
|
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import os
import stat
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.module_utils._text import to_bytes
from ansible.playbook.block import Block
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
from ansible.utils.display import Display
display = Display()
class PlaybookCLI(CLI):
''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
See the project home page (https://docs.ansible.com) for more information. '''
name = 'ansible-playbook'
def init_parser(self):
# create parser for CLI options
super(PlaybookCLI, self).init_parser(
usage="%prog [options] playbook.yml [playbook2 ...]",
desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.")
opt_help.add_connect_options(self.parser)
opt_help.add_meta_options(self.parser)
opt_help.add_runas_options(self.parser)
opt_help.add_subset_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
# ansible playbook specific opts
self.parser.add_argument('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
self.parser.add_argument('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
self.parser.add_argument('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
self.parser.add_argument('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
self.parser.add_argument('args', help='Playbook(s)', metavar='playbook', nargs='+')
def post_process_args(self, options):
options = super(PlaybookCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, fork_opts=True)
return options
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# manages passwords
sshpass = None
becomepass = None
passwords = {}
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
# also prep plugin paths
b_playbook_dirs = []
for playbook in context.CLIARGS['args']:
# resolve if it is collection playbook with FQCN notation, if not, leaves unchanged
resource = _get_collection_playbook_path(playbook)
if resource is not None:
playbook_collection = resource[2]
else:
# not an FQCN so must be a file
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# check if playbook is from collection (path can be passed directly)
playbook_collection = _get_collection_name_from_path(playbook)
# don't add collection playbooks to adjacency search path
if not playbook_collection:
# setup dirs to enable loading plugins from all playbooks in case they add callbacks/inventory/etc
b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
add_all_plugin_dirs(b_playbook_dir)
b_playbook_dirs.append(b_playbook_dir)
if b_playbook_dirs:
# allow collections adjacent to these playbooks
# we use list copy to avoid opening up 'adjacency' in the previous loop
AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
# don't deal with privilege escalation or passwords when we don't need to
if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
# create base objects
loader, inventory, variable_manager = self._play_prereqs()
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
CLI.get_host_list(inventory, context.CLIARGS['subset'])
# flush fact cache if requested
if context.CLIARGS['flush_cache']:
self._flush_cache(inventory, variable_manager)
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
variable_manager=variable_manager, loader=loader,
passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
if play._included_path is not None:
loader.set_basedir(play._included_path)
else:
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
loader.set_basedir(pb_dir)
# show host list if we were able to template into a list
try:
host_list = ','.join(play.hosts)
except TypeError:
host_list = ''
msg = "\n play #%d (%s): %s" % (idx + 1, host_list, play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
if context.CLIARGS['listhosts']:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
display.display(msg)
all_tags = set()
if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
taskmsg = ''
if context.CLIARGS['listtasks']:
taskmsg = ' tasks:\n'
def _process_block(b):
taskmsg = ''
for task in b.block:
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
if task.action in C._ACTION_META and task.implicit:
continue
all_tags.update(task.tags)
if context.CLIARGS['listtasks']:
cur_tags = list(mytags.union(set(task.tags)))
cur_tags.sort()
if task.name:
taskmsg += " %s" % task.get_name()
else:
taskmsg += " %s" % task.action
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
return taskmsg
all_vars = variable_manager.get_vars(play=play)
for block in play.compile():
block = block.filter_tagged_tasks(all_vars)
if not block.has_tasks():
continue
taskmsg += _process_block(block)
if context.CLIARGS['listtags']:
cur_tags = list(mytags.union(all_tags))
cur_tags.sort()
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
return 0
else:
return results
@staticmethod
def _flush_cache(inventory, variable_manager):
for host in inventory.list_hosts():
hostname = host.get_name()
variable_manager.clear_facts(hostname)
def main(args=None):
PlaybookCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
mattclay/ansible
|
lib/ansible/cli/playbook.py
|
Python
|
gpl-3.0
| 10,305
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from contextlib import contextmanager
from pants.goal.products import MultipleRootedProducts, Products
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants_test.base_test import BaseTest
class ProductsTest(BaseTest):
def setUp(self):
super(ProductsTest, self).setUp()
self.products = Products()
def test_require(self):
self.products.require('foo')
self.assertTrue(self.products.isrequired('foo'))
self.assertFalse(self.products.isrequired('bar'))
# require should not cross-contaminate require_data
self.assertFalse(self.products.is_required_data('foo'))
self.assertFalse(self.products.is_required_data('bar'))
def test_get(self):
foo_product_mapping1 = self.products.get('foo')
foo_product_mapping2 = self.products.get('foo')
self.assertIsInstance(foo_product_mapping1, Products.ProductMapping)
self.assertIs(foo_product_mapping1, foo_product_mapping2)
def test_get_does_not_require(self):
self.assertFalse(self.products.isrequired('foo'))
self.products.get('foo')
self.assertFalse(self.products.isrequired('foo'))
self.products.require('foo')
self.assertTrue(self.products.isrequired('foo'))
def test_require_data(self):
self.products.require_data('foo')
self.assertTrue(self.products.is_required_data('foo'))
self.assertFalse(self.products.is_required_data('bar'))
# require_data should not cross-contaminate require
self.assertFalse(self.products.isrequired('foo'))
self.assertFalse(self.products.isrequired('bar'))
def test_get_data(self):
self.assertIsNone(self.products.get_data('foo'))
data1 = self.products.get_data('foo', dict)
data2 = self.products.get_data('foo', dict)
self.assertIsInstance(data1, dict)
self.assertIs(data1, data2)
def test_get_data_does_not_require_data(self):
self.assertFalse(self.products.is_required_data('foo'))
self.products.get_data('foo')
self.assertFalse(self.products.is_required_data('foo'))
self.products.require_data('foo')
self.assertTrue(self.products.is_required_data('foo'))
def test_empty_products(self):
foo_product_mapping = self.products.get('foo')
self.assertFalse(foo_product_mapping)
@contextmanager
def add_products(self, context_products, product_type, target, *products):
product_mapping = context_products.get(product_type)
with temporary_dir() as outdir:
def create_product(product):
with safe_open(os.path.join(outdir, product), mode='w') as fp:
fp.write(product)
return product
product_mapping.add(target, outdir, map(create_product, products))
yield temporary_dir
def test_non_empty_products(self):
target = self.make_target('c')
with self.add_products(self.products, 'foo', target, 'a.class'):
foo_product_mapping = self.products.get('foo')
self.assertTrue(foo_product_mapping)
def test_empty_data(self):
foo_product_mapping = self.products.get_data('foo')
self.assertFalse(foo_product_mapping)
@contextmanager
def add_data(self, context_products, data_type, target, *products):
make_products = lambda: defaultdict(MultipleRootedProducts)
data_by_target = context_products.get_data(data_type, make_products)
with temporary_dir() as outdir:
def create_product(product):
abspath = os.path.join(outdir, product)
with safe_open(abspath, mode='w') as fp:
fp.write(product)
return abspath
data_by_target[target].add_abs_paths(outdir, map(create_product, products))
yield temporary_dir
def test_non_empty_data(self):
target = self.make_target('c')
with self.add_data(self.products, 'foo', target, 'a.class'):
foo_product_mapping = self.products.get_data('foo')
self.assertTrue(foo_product_mapping)
|
ericzundel/pants
|
tests/python/pants_test/goal/test_products.py
|
Python
|
apache-2.0
| 4,205
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_invoice
import sale_order
import payment
|
chienlieu2017/it_management
|
odoo/addons/website_portal_sale/models/__init__.py
|
Python
|
gpl-3.0
| 156
|
import m1
m1.<caret>
|
jwren/intellij-community
|
python/testData/completion/underscoredItemsOrderModuleImport/a.py
|
Python
|
apache-2.0
| 20
|
from __future__ import unicode_literals
from unittest import expectedFailure
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django import forms
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.views.generic.base import View
from django.views.generic.edit import FormMixin, ModelFormMixin, CreateView
from . import views
from .models import Artist, Author
class FormMixinTests(TestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
def test_get_prefix(self):
""" Test prefix can be set (see #18872) """
test_string = 'test'
rf = RequestFactory()
get_request = rf.get('/')
class TestFormMixin(FormMixin):
request = get_request
default_kwargs = TestFormMixin().get_form_kwargs()
self.assertEqual(None, default_kwargs.get('prefix'))
set_mixin = TestFormMixin()
set_mixin.prefix = test_string
set_kwargs = set_mixin.get_form_kwargs()
self.assertEqual(test_string, set_kwargs.get('prefix'))
@override_settings(ROOT_URLCONF='generic_views.urls')
class BasicFormTests(TestCase):
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, 'http://testserver/list/authors/')
class ModelFormMixinTests(TestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get('/')
self.assertEqual({'initial': {}, 'prefix': None},
mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF='generic_views.urls')
class CreateViewTests(TestCase):
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertFalse('object' in res.context)
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_with_interpolated_redirect(self):
res = self.client.post('/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertFalse('object' in res.context)
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
def test_create_view_without_explicit_fields(self):
class MyCreateView(CreateView):
model = Author
message = (
"Using ModelFormMixin (base class of MyCreateView) without the "
"'fields' attribute is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
@override_settings(ROOT_URLCONF='generic_views.urls')
class UpdateViewTests(TestCase):
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@expectedFailure
def test_update_put(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.put('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
# Here is the expected failure. PUT data are not processed in any special
# way by django. So the request will equal to a POST without data, hence
# the form will be invalid and redisplayed with errors (status code 200).
# See also #12635
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@override_settings(ROOT_URLCONF='generic_views.urls')
class DeleteViewTests(TestCase):
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/delete/naive/' % a.pk)
|
kswiat/django
|
tests/generic_views/test_edit.py
|
Python
|
bsd-3-clause
| 17,034
|
import pdb;
import sys;
import linecache
def qdebug(options = None,
expanded = None,
typeformats = None,
individualformats = None,
watchers = None):
class QDebug:
def __init__(self,
options = None,
expanded = None,
typeformats = None,
individualformats = None,
watchers = None):
self.options = options
self.expandedINames = expanded
self.typeformats = typeformats
self.individualformats = individualformats
self.watchers = watchers
self.buffer = ""
if self.options == "listmodules":
self.handleListModules()
elif self.options == "listsymbols":
self.handleListSymbols(expanded)
else:
self.handleListVars()
def put(self, value):
#sys.stdout.write(value)
self.buffer += value
def putField(self, name, value):
self.put('%s="%s",' % (name, value))
def putItemCount(self, count):
self.put('value="<%s items>",' % count)
def putEllipsis(self):
self.put('{name="<incomplete>",value="",type="",numchild="0"},')
def cleanType(self, type):
t = str(type)
if t.startswith("<type '") and t.endswith("'>"):
t = t[7:-2]
if t.startswith("<class '") and t.endswith("'>"):
t = t[8:-2]
return t
def putType(self, type, priority = 0):
self.putField("type", self.cleanType(type))
def putAddress(self, addr):
self.put('addr="%s",' % cleanAddress(addr))
def putNumChild(self, numchild):
self.put('numchild="%s",' % numchild)
def putValue(self, value, encoding = None, priority = 0):
self.putField("value", value)
def putName(self, name):
self.put('name="%s",' % name)
def isExpanded(self, iname):
#self.warn("IS EXPANDED: %s in %s" % (iname, self.expandedINames))
if iname.startswith("None"):
raise "Illegal iname '%s'" % iname
#self.warn(" --> %s" % (iname in self.expandedINames))
return iname in self.expandedINames
def isExpandedIName(self, iname):
return iname in self.expandedINames
def itemFormat(self, item):
format = self.formats.get(str(cleanAddress(item.value.address)))
if format is None:
format = self.typeformats.get(stripClassTag(str(item.value.type)))
return format
def dumpFrame(self, frame):
for var in frame.f_locals.keys():
if var == "__file__":
continue
#if var == "__name__":
# continue
if var == "__package__":
continue
if var == "qdebug":
continue
if var != '__builtins__':
value = frame.f_locals[var]
self.dumpValue(value, var, "local.%s" % var)
def dumpValue(self, value, name, iname):
t = type(value)
tt = self.cleanType(t)
if tt == "module" or tt == "function":
return
if str(value).startswith("<class '"):
return
# FIXME: Should we?
if str(value).startswith("<enum-item "):
return
self.put("{")
self.putField("iname", iname)
self.putName(name)
self.putType(tt)
if tt == "NoneType":
self.putValue("None")
self.putNumChild(0)
elif tt == "list" or tt == "tuple":
self.putItemCount(len(value))
#self.putValue(value)
self.put("children=[")
for i in xrange(len(value)):
self.dumpValue(value[i], str(i), "%s.%d" % (iname, i))
self.put("]")
elif tt == "str":
v = value
self.putValue(v.encode('hex'))
self.putField("valueencoded", 6)
self.putNumChild(0)
elif tt == "unicode":
v = value
self.putValue(v.encode('hex'))
self.putField("valueencoded", 6)
self.putNumChild(0)
elif tt == "buffer":
v = str(value)
self.putValue(v.encode('hex'))
self.putField("valueencoded", 6)
self.putNumChild(0)
elif tt == "xrange":
b = iter(value).next()
e = b + len(value)
self.putValue("(%d, %d)" % (b, e))
self.putNumChild(0)
elif tt == "dict":
self.putItemCount(len(value))
self.putField("childnumchild", 2)
self.put("children=[")
i = 0
for (k, v) in value.iteritems():
self.put("{")
self.putType(" ")
self.putValue("%s: %s" % (k, v))
if self.isExpanded(iname):
self.put("children=[")
self.dumpValue(k, "key", "%s.%d.k" % (iname, i))
self.dumpValue(v, "value", "%s.%d.v" % (iname, i))
self.put("]")
self.put("},")
i += 1
self.put("]")
elif tt == "class":
pass
elif tt == "module":
pass
elif tt == "function":
pass
elif str(value).startswith("<enum-item "):
# FIXME: Having enums always shown like this is not nice.
self.putValue(str(value)[11:-1])
self.putNumChild(0)
else:
v = str(value)
p = v.find(" object at ")
if p > 1:
v = "@" + v[p + 11:-1]
self.putValue(v)
if self.isExpanded(iname):
self.put("children=[")
for child in dir(value):
if child == "__dict__":
continue
if child == "__doc__":
continue
if child == "__module__":
continue
attr = getattr(value, child)
if callable(attr):
continue
try:
self.dumpValue(attr, child, "%s.%s" % (iname, child))
except:
pass
self.put("],")
self.put("},")
def warn(self, msg):
self.putField("warning", msg)
def handleListVars(self):
# Trigger error to get a backtrace.
frame = None
#self.warn("frame: %s" % frame)
try:
raise ZeroDivisionError
except ZeroDivisionError:
frame = sys.exc_info()[2].tb_frame.f_back
limit = 30
n = 0
isActive = False
while frame is not None and n < limit:
#self.warn("frame: %s" % frame.f_locals.keys())
lineno = frame.f_lineno
code = frame.f_code
filename = code.co_filename
name = code.co_name
if isActive:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame.f_globals)
self.dumpFrame(frame)
if name == "<module>":
isActive = False
if name == "trace_dispatch":
isActive = True
frame = frame.f_back
n = n + 1
#sys.stdout.flush()
def handleListModules(self):
self.put("modules=[");
for name in sys.modules:
self.put("{")
self.putName(name)
self.putValue(sys.modules[name])
self.put("},")
self.put("]")
#sys.stdout.flush()
def handleListSymbols(self, module):
#self.put("symbols=%s" % dir(sys.modules[module]))
self.put("symbols=[");
for name in sys.modules:
self.put("{")
self.putName(name)
#self.putValue(sys.modules[name])
self.put("},")
self.put("]")
#sys.stdout.flush()
d = QDebug(options, expanded, typeformats, individualformats, watchers)
#print d.buffer
sys.stdout.write(d.buffer)
sys.stdout.flush()
|
renatofilho/QtCreator
|
share/qtcreator/gdbmacros/pdumper.py
|
Python
|
lgpl-2.1
| 9,000
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
PointsAlongLines.py
---------------------
Date : Janaury 2015
Copyright : (C) 2015 by Giovanni Manghi
Email : giovanni dot manghi at naturalgis dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giovanni Manghi'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Giovanni Manghi'
from qgis.core import (QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterVectorDestination,
QgsProcessingParameterDefinition,
QgsProcessing)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
class PointsAlongLines(GdalAlgorithm):
INPUT = 'INPUT'
GEOMETRY = 'GEOMETRY'
DISTANCE = 'DISTANCE'
OPTIONS = 'OPTIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVectorLine]))
self.addParameter(QgsProcessingParameterString(self.GEOMETRY,
self.tr('Geometry column name'),
defaultValue='geometry'))
self.addParameter(QgsProcessingParameterNumber(self.DISTANCE,
self.tr('Distance from line start represented as fraction of line length'),
type=QgsProcessingParameterNumber.Double,
minValue=0,
maxValue=1,
defaultValue=0.5))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(options_param)
self.addParameter(QgsProcessingParameterVectorDestination(self.OUTPUT,
self.tr('Points along lines'),
QgsProcessing.TypeVectorPoint))
def name(self):
return 'pointsalonglines'
def displayName(self):
return self.tr('Points along lines')
def group(self):
return self.tr('Vector geoprocessing')
def groupId(self):
return 'vectorgeoprocessing'
def commandName(self):
return 'ogr2ogr'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
fields = source.fields()
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
distance = self.parameterAsDouble(parameters, self.DISTANCE, context)
geometry = self.parameterAsString(parameters, self.GEOMETRY, context)
outFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, outFile)
options = self.parameterAsString(parameters, self.OPTIONS, context)
output, outputFormat = GdalUtils.ogrConnectionStringAndFormat(outFile, context)
other_fields_exist = any(
f for f in fields
if f.name() != geometry
)
other_fields = ',*' if other_fields_exist else ''
arguments = [
output,
ogrLayer,
'-dialect',
'sqlite',
'-sql',
f'SELECT ST_Line_Interpolate_Point({geometry}, {distance}) AS {geometry}{other_fields} FROM "{layerName}"'
]
if options:
arguments.append(options)
if outputFormat:
arguments.append(f'-f {outputFormat}')
return ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
|
rduivenvoorde/QGIS
|
python/plugins/processing/algs/gdal/PointsAlongLines.py
|
Python
|
gpl-2.0
| 5,348
|
# Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
from pox.lib.addresses import *
import pox.lib.packet as pkt
from struct import pack
import time
from struct import pack
import time
class SocketWedge (object):
def __init__ (self, socket):
self._socket = socket
def send (self, string, *args, **kw):
r = self._socket.send(string, *args, **kw)
self._send_out(string, r)
return r
def recv (self, bufsize, *args, **kw):
r = self._socket.recv(bufsize, *args, **kw)
self._recv_out(r)
return r
def __getattr__ (self, n):
return getattr(self._socket, n)
class PCapWriter (object):
def __init__ (self, outstream, socket = None, flush = False,
local_addrs = (None,None,None),
remote_addrs = (None,None,None)):
"""
outstream is the stream to write the PCAP trace to.
Ethernet addresses have to be faked, and it can be convenient to
fake IP and TCP addresses as well. Thus, you can specify local_addrs
or remote_addrs. These are tuples of (EthAddr, IPAddr, TCPPort).
Any item that is None gets a default value.
"""
self._out = outstream
self._flush = flush
if socket is not None:
remote = socket.getpeername()
local = socket.getsockname()
else:
remote = ("1.1.1.1",1)
local = ("0.0.0.0",0)
def create_packet (e1,e2,i1,i2,t1,t2):
e = pkt.ethernet(
src = e1,
dst = e2,
type = pkt.ethernet.IP_TYPE)
i = pkt.ipv4(
srcip = i1,
dstip = i2,
protocol = pkt.ipv4.TCP_PROTOCOL)
t = pkt.tcp(
srcport = t1,
dstport = t2,
off = 5,
win = 1)
t.ACK = True
i.payload = t
e.payload = i
return e
self._c_to_s = create_packet(
local_addrs[0] or EthAddr("\x02" + "\x00" * 5),
remote_addrs[0] or EthAddr("\x02" + "\x11" * 5),
local_addrs[1] or IPAddr(local[0]),
remote_addrs[1] or IPAddr(remote[0]),
local_addrs[2] or local[1],
remote_addrs[2] or remote[1],
)
self._s_to_c = create_packet(
remote_addrs[0] or EthAddr("\x02" + "\x11" * 5),
local_addrs[0] or EthAddr("\x02" + "\x00" * 5),
remote_addrs[1] or IPAddr(remote[0]),
local_addrs[1] or IPAddr(local[0]),
remote_addrs[2] or remote[1],
local_addrs[2] or local[1],
)
outstream.write(pack("IHHiIII",
0xa1b2c3d4, # Magic
2,4, # Version
time.timezone, # TZ offset
0, # Accuracy of timestamps (apparently 0 is OK)
0x7fffFFff, # Snaplen
1 # Ethernet
))
def write (self, outgoing, buf):
if len(buf) == 0: return
e = self._c_to_s if outgoing else self._s_to_c
e2 = self._c_to_s if not outgoing else self._s_to_c
l = len(buf)
e.payload.payload.payload = buf
buf = e.pack()
t = time.time()
ut = t - int(t)
t = int(t)
ut = int(ut * 1000000)
self._out.write(pack("IIII",
t,ut, # Timestamp
len(buf), # Saved size
len(buf), # Original size
))
self._out.write(buf)
if self._flush: self._out.flush()
e.next.next.seq += l
e2.next.next.ack += l
class CaptureSocket (SocketWedge):
"""
Wraps a TCP socket and writes a faked PCAP format trace
"""
def __init__ (self, socket, outstream, close = True,
local_addrs = (None,None,None),
remote_addrs = (None,None,None)):
"""
socket is the socket to be wrapped.
outstream is the stream to write the PCAP trace to.
Ethernet addresses have to be faked, and it can be convenient to
fake IP and TCP addresses as well. Thus, you can specify local_addrs
or remote_addrs. These are tuples of (EthAddr, IPAddr, TCPPort).
Any item that is None gets a default value.
"""
super(CaptureSocket, self).__init__(socket)
self._close = close
self._writer = PCapWriter(outstream, socket=socket,
local_addrs=local_addrs,
remote_addrs=remote_addrs)
def _recv_out (self, buf):
try:
self._writer.write(False, buf)
except Exception:
pass
def _send_out (self, buf, r):
try:
self._writer.write(True, buf[:r])
except Exception:
pass
def close (self, *args, **kw):
if self._close:
try:
self._writer._out.close()
except Exception:
pass
return self._socket.close(*args, **kw)
if __name__ == "__main__":
"""
Test with:
nc -v -v -l 9933
"""
import socket
sock = socket.create_connection(("127.0.0.1",9933))
s = CaptureSocket(sock, file("test.pcap", "w"))
while True:
d = s.recv(1024)
d = d.upper()
import sys
import time
import random
time.sleep(random.random() * 1.5)
sys.stdout.write(d)
s.send(d)
|
0xdyu/RouteFlow-Exodus
|
pox/pox/lib/socketcapture.py
|
Python
|
apache-2.0
| 5,511
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from lstm import bi_lstm_unroll
from sort_io import BucketSentenceIter, default_build_vocab
def Perplexity(label, pred):
label = label.T.reshape((-1,))
loss = 0.
for i in range(pred.shape[0]):
loss += -np.log(max(1e-10, pred[i][int(label[i])]))
return np.exp(loss / label.size)
if __name__ == '__main__':
batch_size = 100
buckets = []
num_hidden = 300
num_embed = 512
num_lstm_layer = 2
num_epoch = 1
learning_rate = 0.1
momentum = 0.9
contexts = [mx.context.gpu(i) for i in range(1)]
vocab = default_build_vocab("./data/sort.train.txt")
def sym_gen(seq_len):
return bi_lstm_unroll(seq_len, len(vocab),
num_hidden=num_hidden, num_embed=num_embed,
num_label=len(vocab))
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_states = init_c + init_h
data_train = BucketSentenceIter("./data/sort.train.txt", vocab,
buckets, batch_size, init_states)
data_val = BucketSentenceIter("./data/sort.valid.txt", vocab,
buckets, batch_size, init_states)
if len(buckets) == 1:
symbol = sym_gen(buckets[0])
else:
symbol = sym_gen
model = mx.model.FeedForward(ctx=contexts,
symbol=symbol,
num_epoch=num_epoch,
learning_rate=learning_rate,
momentum=momentum,
wd=0.00001,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34))
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
model.fit(X=data_train, eval_data=data_val,
eval_metric = mx.metric.np(Perplexity),
batch_end_callback=mx.callback.Speedometer(batch_size, 50),)
model.save("sort")
|
Mega-DatA-Lab/mxnet
|
example/bi-lstm-sort/lstm_sort.py
|
Python
|
apache-2.0
| 3,168
|
"""
Unittests for deleting a course in an chosen modulestore
"""
import unittest
import mock
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from django.core.management import call_command, CommandError
from contentstore.tests.utils import CourseTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.django import modulestore
class DeleteCourseTest(CourseTestCase):
"""
Test for course deleting functionality of the 'delete_course' command
"""
YESNO_PATCH_LOCATION = 'contentstore.management.commands.delete_course.query_yes_no'
def setUp(self):
super(DeleteCourseTest, self).setUp()
org = 'TestX'
course_number = 'TS01'
course_run = '2015_Q1'
# Create a course using split modulestore
self.course = CourseFactory.create(
org=org,
number=course_number,
run=course_run
)
def test_invalid_key_not_found(self):
"""
Test for when a course key is malformed
"""
errstring = "Invalid course_key: 'foo/TestX/TS01/2015_Q7'."
with self.assertRaisesRegexp(CommandError, errstring):
call_command('delete_course', 'foo/TestX/TS01/2015_Q7')
def test_course_key_not_found(self):
"""
Test for when a non-existing course key is entered
"""
errstring = "Course with 'TestX/TS01/2015_Q7' key not found."
with self.assertRaisesRegexp(CommandError, errstring):
call_command('delete_course', 'TestX/TS01/2015_Q7')
def test_course_deleted(self):
"""
Testing if the entered course was deleted
"""
#Test if the course that is about to be deleted exists
self.assertIsNotNone(modulestore().get_course(SlashSeparatedCourseKey("TestX", "TS01", "2015_Q1")))
with mock.patch(self.YESNO_PATCH_LOCATION) as patched_yes_no:
patched_yes_no.return_value = True
call_command('delete_course', 'TestX/TS01/2015_Q1')
self.assertIsNone(modulestore().get_course(SlashSeparatedCourseKey("TestX", "TS01", "2015_Q1")))
|
solashirai/edx-platform
|
cms/djangoapps/contentstore/management/commands/tests/test_delete_course.py
|
Python
|
agpl-3.0
| 2,155
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime, timedelta
from openerp import netsvc
from openerp.addons import decimal_precision as dp
from openerp.osv import fields, orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DFORMAT
from openerp.tools.translate import _
class contract_init(orm.Model):
_name = 'hr.contract.init'
_description = 'Initial Contract Settings'
_inherit = 'ir.needaction_mixin'
_columns = {
'name': fields.char(
'Name',
size=64,
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
),
'date': fields.date(
'Effective Date',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
),
'wage_ids': fields.one2many(
'hr.contract.init.wage',
'contract_init_id',
'Starting Wages', readonly=True,
states={'draft': [('readonly', False)]},
),
'struct_id': fields.many2one(
'hr.payroll.structure',
'Payroll Structure',
readonly=True,
states={'draft': [('readonly', False)]},
),
'trial_period': fields.integer(
'Trial Period',
readonly=True,
states={'draft': [('readonly', False)]},
help="Length of Trial Period, in days",
),
'active': fields.boolean(
'Active',
),
'state': fields.selection(
[
('draft', 'Draft'),
('approve', 'Approved'),
('decline', 'Declined'),
],
'State',
readonly=True,
),
}
_defaults = {
'trial_period': 0,
'active': True,
'state': 'draft',
}
# Return records with latest date first
_order = 'date desc'
def _needaction_domain_get(self, cr, uid, context=None):
users_obj = self.pool.get('res.users')
if users_obj.has_group(cr, uid, 'base.group_hr_director'):
domain = [('state', 'in', ['draft'])]
return domain
return False
def unlink(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
data = self.read(cr, uid, ids, ['state'], context=context)
for d in data:
if d['state'] in ['approve', 'decline']:
raise orm.except_orm(
_('Error'),
_('You may not a delete a record that is not in a '
'"Draft" state')
)
return super(contract_init, self).unlink(cr, uid, ids, context=context)
def set_to_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
}, context=context)
wf_service = netsvc.LocalService("workflow")
for i in ids:
wf_service.trg_delete(uid, 'hr.contract.init', i, cr)
wf_service.trg_create(uid, 'hr.contract.init', i, cr)
return True
def state_approve(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approve'}, context=context)
return True
def state_decline(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'decline'}, context=context)
return True
class init_wage(orm.Model):
_name = 'hr.contract.init.wage'
_description = 'Starting Wages'
_columns = {
'job_id': fields.many2one(
'hr.job',
'Job',
),
'starting_wage': fields.float(
'Starting Wage',
digits_compute=dp.get_precision('Payroll'),
required=True
),
'is_default': fields.boolean(
'Use as Default',
help="Use as default wage",
),
'contract_init_id': fields.many2one(
'hr.contract.init',
'Contract Settings',
),
'category_ids': fields.many2many(
'hr.employee.category',
'contract_init_category_rel',
'contract_init_id',
'category_id',
'Tags',
),
}
def _rec_message(self, cr, uid, ids, context=None):
return _('A Job Position cannot be referenced more than once in a '
'Contract Settings record.')
_sql_constraints = [
('unique_job_cinit', 'UNIQUE(job_id,contract_init_id)', _rec_message),
]
def unlink(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
data = self.read(cr, uid, ids, ['contract_init_id'], context=context)
for d in data:
if not d.get('contract_init_id', False):
continue
d2 = self.pool.get(
'hr.contract.init').read(cr, uid, d['contract_init_id'][0],
['state'], context=context)
if d2['state'] in ['approve', 'decline']:
raise orm.except_orm(
_('Error'),
_('You may not a delete a record that is not in a '
'"Draft" state')
)
return super(init_wage, self).unlink(cr, uid, ids, context=context)
class hr_contract(orm.Model):
_inherit = 'hr.contract'
def _get_wage(self, cr, uid, context=None, job_id=None):
res = 0
default = 0
init = self.get_latest_initial_values(cr, uid, context=context)
if job_id:
catdata = self.pool.get('hr.job').read(
cr, uid, job_id, ['category_ids'], context=context)
else:
catdata = False
if init is not None:
for line in init.wage_ids:
if job_id is not None and line.job_id.id == job_id:
res = line.starting_wage
elif catdata:
cat_id = False
category_ids = [c.id for c in line.category_ids]
for ci in catdata['category_ids']:
if ci in category_ids:
cat_id = ci
break
if cat_id:
res = line.starting_wage
if line.is_default and default == 0:
default = line.starting_wage
if res != 0:
break
if res == 0:
res = default
return res
def _get_struct(self, cr, uid, context=None):
res = False
init = self.get_latest_initial_values(cr, uid, context=context)
if init is not None and init.struct_id:
res = init.struct_id.id
return res
def _get_trial_date_start(self, cr, uid, context=None):
res = False
init = self.get_latest_initial_values(cr, uid, context=context)
if init is not None and init.trial_period and init.trial_period > 0:
res = datetime.now().strftime(OE_DFORMAT)
return res
def _get_trial_date_end(self, cr, uid, context=None):
res = False
init = self.get_latest_initial_values(cr, uid, context=context)
if init is not None and init.trial_period and init.trial_period > 0:
dEnd = datetime.now().date() + timedelta(days=init.trial_period)
res = dEnd.strftime(OE_DFORMAT)
return res
_defaults = {
'wage': _get_wage,
'struct_id': _get_struct,
'trial_date_start': _get_trial_date_start,
'trial_date_end': _get_trial_date_end,
}
def onchange_job(self, cr, uid, ids, job_id, context=None):
res = False
if job_id:
wage = self._get_wage(cr, uid, context=context, job_id=job_id)
res = {'value': {'wage': wage}}
return res
def onchange_trial(self, cr, uid, ids, trial_date_start, context=None):
res = {'value': {'trial_date_end': False}}
init = self.get_latest_initial_values(cr, uid, context=context)
if init is not None and init.trial_period and init.trial_period > 0:
dStart = datetime.strptime(trial_date_start, OE_DFORMAT)
dEnd = dStart + timedelta(days=init.trial_period)
res['value']['trial_date_end'] = dEnd.strftime(OE_DFORMAT)
return res
def get_latest_initial_values(self, cr, uid, today_str=None, context=None):
"""Return a record with an effective date before today_str
but greater than all others
"""
init_obj = self.pool.get('hr.contract.init')
if today_str is None:
today_str = datetime.now().strftime(OE_DFORMAT)
dToday = datetime.strptime(today_str, OE_DFORMAT).date()
res = None
ids = init_obj.search(
cr, uid, [('date', '<=', today_str), ('state', '=', 'approve')],
context=context)
for init in init_obj.browse(cr, uid, ids, context=context):
d = datetime.strptime(init.date, OE_DFORMAT).date()
if d <= dToday:
if res is None:
res = init
elif d > datetime.strptime(res.date, OE_DFORMAT).date():
res = init
return res
|
microcom/hr
|
hr_contract_init/hr_contract.py
|
Python
|
agpl-3.0
| 10,098
|
# -*- coding: utf-8-*-
import re
import logging
import difflib
import mpd
from client.mic import Mic
# Standard module stuff
WORDS = ["MUSIC", "SPOTIFY"]
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, by telling a joke.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
logger = logging.getLogger(__name__)
kwargs = {}
if 'mpdclient' in profile:
if 'server' in profile['mpdclient']:
kwargs['server'] = profile['mpdclient']['server']
if 'port' in profile['mpdclient']:
kwargs['port'] = int(profile['mpdclient']['port'])
logger.debug("Preparing to start music module")
try:
mpdwrapper = MPDWrapper(**kwargs)
except:
logger.error("Couldn't connect to MPD server", exc_info=True)
mic.say("I'm sorry. It seems that Spotify is not enabled. Please " +
"read the documentation to learn how to configure Spotify.")
return
mic.say("Please give me a moment, I'm loading your Spotify playlists.")
# FIXME: Make this configurable
persona = 'JASPER'
logger.debug("Starting music mode")
music_mode = MusicMode(persona, mic, mpdwrapper)
music_mode.handleForever()
logger.debug("Exiting music mode")
return
def isValid(text):
"""
Returns True if the input is related to jokes/humor.
Arguments:
text -- user-input, typically transcribed speech
"""
return any(word in text.upper() for word in WORDS)
# The interesting part
class MusicMode(object):
def __init__(self, PERSONA, mic, mpdwrapper):
self._logger = logging.getLogger(__name__)
self.persona = PERSONA
# self.mic - we're actually going to ignore the mic they passed in
self.music = mpdwrapper
# index spotify playlists into new dictionary and language models
phrases = ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS",
"LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME",
"PLAYLIST"]
phrases.extend(self.music.get_soup_playlist())
music_stt_engine = mic.active_stt_engine.get_instance('music', phrases)
self.mic = Mic(mic.speaker,
mic.passive_stt_engine,
music_stt_engine)
def delegateInput(self, input):
command = input.upper()
# check if input is meant to start the music module
if "PLAYLIST" in command:
command = command.replace("PLAYLIST", "")
elif "STOP" in command:
self.mic.say("Stopping music")
self.music.stop()
return
elif "PLAY" in command:
self.mic.say("Playing %s" % self.music.current_song())
self.music.play()
return
elif "PAUSE" in command:
self.mic.say("Pausing music")
# not pause because would need a way to keep track of pause/play
# state
self.music.stop()
return
elif any(ext in command for ext in ["LOUDER", "HIGHER"]):
self.mic.say("Louder")
self.music.volume(interval=10)
self.music.play()
return
elif any(ext in command for ext in ["SOFTER", "LOWER"]):
self.mic.say("Softer")
self.music.volume(interval=-10)
self.music.play()
return
elif "NEXT" in command:
self.mic.say("Next song")
self.music.play() # backwards necessary to get mopidy to work
self.music.next()
self.mic.say("Playing %s" % self.music.current_song())
return
elif "PREVIOUS" in command:
self.mic.say("Previous song")
self.music.play() # backwards necessary to get mopidy to work
self.music.previous()
self.mic.say("Playing %s" % self.music.current_song())
return
# SONG SELECTION... requires long-loading dictionary and language model
# songs = self.music.fuzzy_songs(query = command.replace("PLAY", ""))
# if songs:
# self.mic.say("Found songs")
# self.music.play(songs = songs)
# print("SONG RESULTS")
# print("============")
# for song in songs:
# print("Song: %s Artist: %s" % (song.title, song.artist))
# self.mic.say("Playing %s" % self.music.current_song())
# else:
# self.mic.say("No songs found. Resuming current song.")
# self.music.play()
# PLAYLIST SELECTION
playlists = self.music.fuzzy_playlists(query=command)
if playlists:
self.mic.say("Loading playlist %s" % playlists[0])
self.music.play(playlist_name=playlists[0])
self.mic.say("Playing %s" % self.music.current_song())
else:
self.mic.say("No playlists found. Resuming current song.")
self.music.play()
return
def handleForever(self):
self.music.play()
self.mic.say("Playing %s" % self.music.current_song())
while True:
threshold, transcribed = self.mic.passiveListen(self.persona)
if not transcribed or not threshold:
self._logger.info("Nothing has been said or transcribed.")
continue
self.music.pause()
input = self.mic.activeListen(MUSIC=True)
if input:
if "close" in input.lower():
self.mic.say("Closing Spotify")
return
self.delegateInput(input)
else:
self.mic.say("Pardon?")
self.music.play()
def reconnect(func, *default_args, **default_kwargs):
"""
Reconnects before running
"""
def wrap(self, *default_args, **default_kwargs):
try:
self.client.connect(self.server, self.port)
except:
pass
# sometimes not enough to just connect
try:
return func(self, *default_args, **default_kwargs)
except:
self.client = mpd.MPDClient()
self.client.timeout = None
self.client.idletimeout = None
self.client.connect(self.server, self.port)
return func(self, *default_args, **default_kwargs)
return wrap
class Song(object):
def __init__(self, id, title, artist, album):
self.id = id
self.title = title
self.artist = artist
self.album = album
class MPDWrapper(object):
def __init__(self, server="localhost", port=6600):
"""
Prepare the client and music variables
"""
self.server = server
self.port = port
# prepare client
self.client = mpd.MPDClient()
self.client.timeout = None
self.client.idletimeout = None
self.client.connect(self.server, self.port)
# gather playlists
self.playlists = [x["playlist"] for x in self.client.listplaylists()]
# gather songs
self.client.clear()
for playlist in self.playlists:
self.client.load(playlist)
self.songs = [] # may have duplicates
# capitalized strings
self.song_titles = []
self.song_artists = []
soup = self.client.playlist()
for i in range(0, len(soup) / 10):
index = i * 10
id = soup[index].strip()
title = soup[index + 3].strip().upper()
artist = soup[index + 2].strip().upper()
album = soup[index + 4].strip().upper()
self.songs.append(Song(id, title, artist, album))
self.song_titles.append(title)
self.song_artists.append(artist)
@reconnect
def play(self, songs=False, playlist_name=False):
"""
Plays the current song or accepts a song to play.
Arguments:
songs -- a list of song objects
playlist_name -- user-defined, something like "Love Song Playlist"
"""
if songs:
self.client.clear()
for song in songs:
try: # for some reason, certain ids don't work
self.client.add(song.id)
except:
pass
if playlist_name:
self.client.clear()
self.client.load(playlist_name)
self.client.play()
@reconnect
def current_song(self):
item = self.client.playlistinfo(int(self.client.status()["song"]))[0]
result = "%s by %s" % (item["title"], item["artist"])
return result
@reconnect
def volume(self, level=None, interval=None):
if level:
self.client.setvol(int(level))
return
if interval:
level = int(self.client.status()['volume']) + int(interval)
self.client.setvol(int(level))
return
@reconnect
def pause(self):
self.client.pause()
@reconnect
def stop(self):
self.client.stop()
@reconnect
def next(self):
self.client.next()
return
@reconnect
def previous(self):
self.client.previous()
return
def get_soup(self):
"""
Returns the list of unique words that comprise song and artist titles
"""
soup = []
for song in self.songs:
song_words = song.title.split(" ")
artist_words = song.artist.split(" ")
soup.extend(song_words)
soup.extend(artist_words)
title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower()
else '_' for c in range(256))
soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate(
title_trans).replace("_", "") for x in soup]
soup = [x for x in soup if x != ""]
return list(set(soup))
def get_soup_playlist(self):
"""
Returns the list of unique words that comprise playlist names
"""
soup = []
for name in self.playlists:
soup.extend(name.split(" "))
title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower()
else '_' for c in range(256))
soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate(
title_trans).replace("_", "") for x in soup]
soup = [x for x in soup if x != ""]
return list(set(soup))
def get_soup_separated(self):
"""
Returns the list of PHRASES that comprise song and artist titles
"""
title_soup = [song.title for song in self.songs]
artist_soup = [song.artist for song in self.songs]
soup = list(set(title_soup + artist_soup))
title_trans = ''.join(chr(c) if chr(c).isupper() or chr(c).islower()
else '_' for c in range(256))
soup = [x.decode('utf-8').encode("ascii", "ignore").upper().translate(
title_trans).replace("_", " ") for x in soup]
soup = [re.sub(' +', ' ', x) for x in soup if x != ""]
return soup
def fuzzy_songs(self, query):
"""
Returns songs matching a query best as possible on either artist
field, etc
"""
query = query.upper()
matched_song_titles = difflib.get_close_matches(query,
self.song_titles)
matched_song_artists = difflib.get_close_matches(query,
self.song_artists)
# if query is beautifully matched, then forget about everything else
strict_priority_title = [x for x in matched_song_titles if x == query]
strict_priority_artists = [
x for x in matched_song_artists if x == query]
if strict_priority_title:
matched_song_titles = strict_priority_title
if strict_priority_artists:
matched_song_artists = strict_priority_artists
matched_songs_bytitle = [
song for song in self.songs if song.title in matched_song_titles]
matched_songs_byartist = [
song for song in self.songs if song.artist in matched_song_artists]
matches = list(set(matched_songs_bytitle + matched_songs_byartist))
return matches
def fuzzy_playlists(self, query):
"""
returns playlist names that match query best as possible
"""
query = query.upper()
lookup = {n.upper(): n for n in self.playlists}
results = [lookup[r] for r in difflib.get_close_matches(query, lookup)]
return results
|
rahul1193/jasper-client
|
client/modules/MPDControl.py
|
Python
|
mit
| 12,997
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_network
short_description: Manages networks on Apache CloudStack based clouds.
description:
- Create, update, restart and delete networks.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name (case sensitive) of the network.
required: true
display_text:
description:
- Display text of the network.
- If not specified, C(name) will be used as C(display_text).
required: false
default: null
network_offering:
description:
- Name of the offering for the network.
- Required if C(state=present).
required: false
default: null
start_ip:
description:
- The beginning IPv4 address of the network belongs to.
- Only considered on create.
required: false
default: null
end_ip:
description:
- The ending IPv4 address of the network belongs to.
- If not specified, value of C(start_ip) is used.
- Only considered on create.
required: false
default: null
gateway:
description:
- The gateway of the network.
- Required for shared networks and isolated networks when it belongs to VPC.
- Only considered on create.
required: false
default: null
netmask:
description:
- The netmask of the network.
- Required for shared networks and isolated networks when it belongs to VPC.
- Only considered on create.
required: false
default: null
start_ipv6:
description:
- The beginning IPv6 address of the network belongs to.
- Only considered on create.
required: false
default: null
end_ipv6:
description:
- The ending IPv6 address of the network belongs to.
- If not specified, value of C(start_ipv6) is used.
- Only considered on create.
required: false
default: null
cidr_ipv6:
description:
- CIDR of IPv6 network, must be at least /64.
- Only considered on create.
required: false
default: null
gateway_ipv6:
description:
- The gateway of the IPv6 network.
- Required for shared networks.
- Only considered on create.
required: false
default: null
vlan:
description:
- The ID or VID of the network.
required: false
default: null
vpc:
description:
- The ID or VID of the network.
required: false
default: null
isolated_pvlan:
description:
- The isolated private vlan for this network.
required: false
default: null
clean_up:
description:
- Cleanup old network elements.
- Only considered on C(state=restarted).
required: false
default: false
acl_type:
description:
- Access control type.
- Only considered on create.
required: false
default: account
choices: [ 'account', 'domain' ]
network_domain:
description:
- The network domain.
required: false
default: null
state:
description:
- State of the network.
required: false
default: present
choices: [ 'present', 'absent', 'restarted' ]
zone:
description:
- Name of the zone in which the network should be deployed.
- If not set, default zone is used.
required: false
default: null
project:
description:
- Name of the project the network to be deployed in.
required: false
default: null
domain:
description:
- Domain the network is related to.
required: false
default: null
account:
description:
- Account the network is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a network
- local_action:
module: cs_network
name: my network
zone: gva-01
network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService
network_domain: example.com
# update a network
- local_action:
module: cs_network
name: my network
display_text: network of domain example.local
network_domain: example.local
# restart a network with clean up
- local_action:
module: cs_network
name: my network
clean_up: yes
state: restared
# remove a network
- local_action:
module: cs_network
name: my network
state: absent
'''
RETURN = '''
---
id:
description: UUID of the network.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the network.
returned: success
type: string
sample: web project
display_text:
description: Display text of the network.
returned: success
type: string
sample: web project
dns1:
description: IP address of the 1st nameserver.
returned: success
type: string
sample: 1.2.3.4
dns2:
description: IP address of the 2nd nameserver.
returned: success
type: string
sample: 1.2.3.4
cidr:
description: IPv4 network CIDR.
returned: success
type: string
sample: 10.101.64.0/24
gateway:
description: IPv4 gateway.
returned: success
type: string
sample: 10.101.64.1
netmask:
description: IPv4 netmask.
returned: success
type: string
sample: 255.255.255.0
cidr_ipv6:
description: IPv6 network CIDR.
returned: success
type: string
sample: 2001:db8::/64
gateway_ipv6:
description: IPv6 gateway.
returned: success
type: string
sample: 2001:db8::1
state:
description: State of the network.
returned: success
type: string
sample: Implemented
zone:
description: Name of zone.
returned: success
type: string
sample: ch-gva-2
domain:
description: Domain the network is related to.
returned: success
type: string
sample: ROOT
account:
description: Account the network is related to.
returned: success
type: string
sample: example account
project:
description: Name of project.
returned: success
type: string
sample: Production
tags:
description: List of resource tags associated with the network.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
acl_type:
description: Access type of the network (Domain, Account).
returned: success
type: string
sample: Account
broadcast_domain_type:
description: Broadcast domain type of the network.
returned: success
type: string
sample: Vlan
type:
description: Type of the network.
returned: success
type: string
sample: Isolated
traffic_type:
description: Traffic type of the network.
returned: success
type: string
sample: Guest
state:
description: State of the network (Allocated, Implemented, Setup).
returned: success
type: string
sample: Allocated
is_persistent:
description: Whether the network is persistent or not.
returned: success
type: boolean
sample: false
network_domain:
description: The network domain
returned: success
type: string
sample: example.local
network_offering:
description: The network offering name.
returned: success
type: string
sample: DefaultIsolatedNetworkOfferingWithSourceNatService
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackNetwork(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackNetwork, self).__init__(module)
self.returns = {
'networkdomain': 'network domain',
'networkofferingname': 'network_offering',
'ispersistent': 'is_persistent',
'acltype': 'acl_type',
'type': 'type',
'traffictype': 'traffic_type',
'ip6gateway': 'gateway_ipv6',
'ip6cidr': 'cidr_ipv6',
'gateway': 'gateway',
'cidr': 'cidr',
'netmask': 'netmask',
'broadcastdomaintype': 'broadcast_domain_type',
'dns1': 'dns1',
'dns2': 'dns2',
}
self.network = None
def get_vpc(self, key=None):
vpc = self.module.params.get('vpc')
if not vpc:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vpcs = self.cs.listVPCs(**args)
if vpcs:
for v in vpcs['vpc']:
if vpc in [ v['name'], v['displaytext'], v['id'] ]:
return self._get_by_key(key, v)
self.module.fail_json(msg="VPC '%s' not found" % vpc)
def get_network_offering(self, key=None):
network_offering = self.module.params.get('network_offering')
if not network_offering:
self.module.fail_json(msg="missing required arguments: network_offering")
args = {}
args['zoneid'] = self.get_zone(key='id')
network_offerings = self.cs.listNetworkOfferings(**args)
if network_offerings:
for no in network_offerings['networkoffering']:
if network_offering in [ no['name'], no['displaytext'], no['id'] ]:
return self._get_by_key(key, no)
self.module.fail_json(msg="Network offering '%s' not found" % network_offering)
def _get_args(self):
args = {}
args['name'] = self.module.params.get('name')
args['displaytext'] = self.get_or_fallback('display_text', 'name')
args['networkdomain'] = self.module.params.get('network_domain')
args['networkofferingid'] = self.get_network_offering(key='id')
return args
def get_network(self):
if not self.network:
network = self.module.params.get('name')
args = {}
args['zoneid'] = self.get_zone(key='id')
args['projectid'] = self.get_project(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
networks = self.cs.listNetworks(**args)
if networks:
for n in networks['network']:
if network in [ n['name'], n['displaytext'], n['id']]:
self.network = n
break
return self.network
def present_network(self):
network = self.get_network()
if not network:
network = self.create_network(network)
else:
network = self.update_network(network)
return network
def update_network(self, network):
args = self._get_args()
args['id'] = network['id']
if self._has_changed(args, network):
self.result['changed'] = True
if not self.module.check_mode:
network = self.cs.updateNetwork(**args)
if 'errortext' in network:
self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
poll_async = self.module.params.get('poll_async')
if network and poll_async:
network = self._poll_job(network, 'network')
return network
def create_network(self, network):
self.result['changed'] = True
args = self._get_args()
args['acltype'] = self.module.params.get('acl_type')
args['zoneid'] = self.get_zone(key='id')
args['projectid'] = self.get_project(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['startip'] = self.module.params.get('start_ip')
args['endip'] = self.get_or_fallback('end_ip', 'start_ip')
args['netmask'] = self.module.params.get('netmask')
args['gateway'] = self.module.params.get('gateway')
args['startipv6'] = self.module.params.get('start_ipv6')
args['endipv6'] = self.get_or_fallback('end_ipv6', 'start_ipv6')
args['ip6cidr'] = self.module.params.get('cidr_ipv6')
args['ip6gateway'] = self.module.params.get('gateway_ipv6')
args['vlan'] = self.module.params.get('vlan')
args['isolatedpvlan'] = self.module.params.get('isolated_pvlan')
args['subdomainaccess'] = self.module.params.get('subdomain_access')
args['vpcid'] = self.get_vpc(key='id')
if not self.module.check_mode:
res = self.cs.createNetwork(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
network = res['network']
return network
def restart_network(self):
network = self.get_network()
if not network:
self.module.fail_json(msg="No network named '%s' found." % self.module.params('name'))
# Restarting only available for these states
if network['state'].lower() in [ 'implemented', 'setup' ]:
self.result['changed'] = True
args = {}
args['id'] = network['id']
args['cleanup'] = self.module.params.get('clean_up')
if not self.module.check_mode:
network = self.cs.restartNetwork(**args)
if 'errortext' in network:
self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
poll_async = self.module.params.get('poll_async')
if network and poll_async:
network = self._poll_job(network, 'network')
return network
def absent_network(self):
network = self.get_network()
if network:
self.result['changed'] = True
args = {}
args['id'] = network['id']
if not self.module.check_mode:
res = self.cs.deleteNetwork(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self._poll_job(res, 'network')
return network
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
display_text = dict(default=None),
network_offering = dict(default=None),
zone = dict(default=None),
start_ip = dict(default=None),
end_ip = dict(default=None),
gateway = dict(default=None),
netmask = dict(default=None),
start_ipv6 = dict(default=None),
end_ipv6 = dict(default=None),
cidr_ipv6 = dict(default=None),
gateway_ipv6 = dict(default=None),
vlan = dict(default=None),
vpc = dict(default=None),
isolated_pvlan = dict(default=None),
clean_up = dict(type='bool', default=False),
network_domain = dict(default=None),
state = dict(choices=['present', 'absent', 'restarted' ], default='present'),
acl_type = dict(choices=['account', 'domain'], default='account'),
project = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['start_ip', 'netmask', 'gateway'],
['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_network = AnsibleCloudStackNetwork(module)
state = module.params.get('state')
if state in ['absent']:
network = acs_network.absent_network()
elif state in ['restarted']:
network = acs_network.restart_network()
else:
network = acs_network.present_network()
result = acs_network.get_result(network)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
StackPointCloud/ansible-modules-extras
|
cloud/cloudstack/cs_network.py
|
Python
|
gpl-3.0
| 17,539
|
from os.path import join, dirname
import sys
sys.path.append(join(dirname(__file__), '_applibs'))
|
rbdedu/runway
|
singleFileApp/.buildozer/android/app/sitecustomize.py
|
Python
|
mit
| 98
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.aggregates import constants
class DeleteAggregateAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Host Aggregate",
u"Delete Host Aggregates",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Host Aggregate",
u"Deleted Host Aggregates",
count
)
def delete(self, request, obj_id):
api.nova.aggregate_delete(request, obj_id)
class CreateAggregateAction(tables.LinkAction):
name = "create"
verbose_name = _("Create Host Aggregate")
url = constants.AGGREGATES_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
class ManageHostsAction(tables.LinkAction):
name = "manage"
verbose_name = _("Manage Hosts")
url = constants.AGGREGATES_MANAGE_HOSTS_URL
classes = ("ajax-modal",)
icon = "plus"
class UpdateMetadataAction(tables.LinkAction):
name = "update-metadata"
verbose_name = _("Update Metadata")
url = constants.AGGREGATES_UPDATE_METADATA_URL
classes = ("ajax-modal",)
icon = "pencil"
class UpdateAggregateAction(tables.LinkAction):
name = "update"
verbose_name = _("Edit Host Aggregate")
url = constants.AGGREGATES_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
class AggregateFilterAction(tables.FilterAction):
def filter(self, table, aggregates, filter_string):
q = filter_string.lower()
def comp(aggregate):
return q in aggregate.name.lower()
return filter(comp, aggregates)
class AvailabilityZoneFilterAction(tables.FilterAction):
def filter(self, table, availability_zones, filter_string):
q = filter_string.lower()
def comp(availabilityZone):
return q in availabilityZone.zoneName.lower()
return filter(comp, availability_zones)
def get_aggregate_hosts(aggregate):
return [host for host in aggregate.hosts]
def get_metadata(aggregate):
return [' = '.join([key, val]) for key, val
in six.iteritems(aggregate.metadata)]
def get_available(zone):
return zone.zoneState['available']
def get_zone_hosts(zone):
hosts = zone.hosts
host_details = []
if hosts is None:
return []
for name, services in hosts.items():
up = all(s['active'] and s['available'] for s in services.values())
up = _("Services Up") if up else _("Services Down")
host_details.append("%(host)s (%(up)s)" % {'host': name, 'up': up})
return host_details
def safe_unordered_list(value):
return filters.unordered_list(value, autoescape=True)
class HostAggregatesTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
availability_zone = tables.Column('availability_zone',
verbose_name=_('Availability Zone'))
hosts = tables.Column(get_aggregate_hosts,
verbose_name=_("Hosts"),
wrap_list=True,
filters=(safe_unordered_list,))
metadata = tables.Column(get_metadata,
verbose_name=_("Metadata"),
wrap_list=True,
filters=(safe_unordered_list,))
class Meta(object):
name = "host_aggregates"
hidden_title = False
verbose_name = _("Host Aggregates")
table_actions = (AggregateFilterAction,
CreateAggregateAction,
DeleteAggregateAction)
row_actions = (UpdateAggregateAction,
ManageHostsAction,
UpdateMetadataAction,
DeleteAggregateAction)
class AvailabilityZonesTable(tables.DataTable):
name = tables.Column('zoneName',
verbose_name=_('Availability Zone Name'))
hosts = tables.Column(get_zone_hosts,
verbose_name=_('Hosts'),
wrap_list=True,
filters=(safe_unordered_list,))
available = tables.Column(get_available,
verbose_name=_('Available'),
status=True,
filters=(filters.yesno, filters.capfirst))
def get_object_id(self, zone):
return zone.zoneName
class Meta(object):
name = "availability_zones"
hidden_title = False
verbose_name = _("Availability Zones")
table_actions = (AvailabilityZoneFilterAction,)
multi_select = False
|
wangxiangyu/horizon
|
openstack_dashboard/dashboards/admin/aggregates/tables.py
|
Python
|
apache-2.0
| 5,497
|
from math import sqrt
def flowsnake(level, startX, startY, endX, endY):
if level == 0:
print"g1 f10 x", endX, "y", endY
else:
p1X = (startX * 2 + endX)/3
p1Y = (startY * 2 + endY)/3
p2X = (startX + endX)/2 + (endY - startY)/sqrt(12.0)
p2Y = (startY + endY)/2 - (endX - startX)/sqrt(12.0)
p3X = (startX + 2 * endX)/3
p3Y = (startY + 2 * endY)/3
flowsnake(level-1, startX, startY, p1X, p1Y)
flowsnake(level-1, p1X, p1Y, p2X, p2Y)
flowsnake(level-1, p2X, p2Y, p3X, p3Y)
flowsnake(level-1, p3X, p3Y, endX, endY)
print "S1M3"
print "g0 z1"
print "g0 x.25 y1.0"
print "g1 f10 z0"
flowsnake(5, .25, 1.0, 3.75, 1.0)
flowsnake(5, 3.75, 1.0, 2.0, 3.95)
flowsnake(5, 2.0, 3.95, .25, 1.0)
print "g0 z1"
print "M2"
|
EqAfrica/machinekit
|
nc_files/flowsnake.py
|
Python
|
lgpl-2.1
| 755
|
from unittest import mock, skipUnless
from django.db import connection
from django.db.backends.mysql.features import DatabaseFeatures
from django.test import TestCase
@skipUnless(connection.vendor == 'mysql', 'MySQL tests')
class TestFeatures(TestCase):
def test_supports_transactions(self):
"""
All storage engines except MyISAM support transactions.
"""
with mock.patch('django.db.connection.features._mysql_storage_engine', 'InnoDB'):
self.assertTrue(connection.features.supports_transactions)
del connection.features.supports_transactions
with mock.patch('django.db.connection.features._mysql_storage_engine', 'MyISAM'):
self.assertFalse(connection.features.supports_transactions)
del connection.features.supports_transactions
def test_skip_locked_no_wait(self):
with mock.MagicMock() as _connection:
_connection.mysql_version = (8, 0, 1)
_connection.mysql_is_mariadb = False
database_features = DatabaseFeatures(_connection)
self.assertTrue(database_features.has_select_for_update_skip_locked)
self.assertTrue(database_features.has_select_for_update_nowait)
with mock.MagicMock() as _connection:
_connection.mysql_version = (8, 0, 0)
_connection.mysql_is_mariadb = False
database_features = DatabaseFeatures(_connection)
self.assertFalse(database_features.has_select_for_update_skip_locked)
self.assertFalse(database_features.has_select_for_update_nowait)
|
nesdis/djongo
|
tests/django_tests/tests/v22/tests/backends/mysql/test_features.py
|
Python
|
agpl-3.0
| 1,590
|
b = bytearray(30)
f = open("io/data/file1", "rb")
print(f.readinto(b))
print(b)
f = open("io/data/file2", "rb")
print(f.readinto(b))
print(b)
# readinto() on writable file
f = open('io/data/file1', 'ab')
try:
f.readinto(bytearray(4))
except OSError:
print('OSError')
|
Xykon/pycom-micropython-sigfox
|
tests/io/file_readinto.py
|
Python
|
mit
| 276
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FirstOfTagTests(SimpleTestCase):
@setup({'firstof01': '{% firstof a b c %}'})
def test_firstof01(self):
output = self.engine.render_to_string('firstof01', {'a': 0, 'c': 0, 'b': 0})
self.assertEqual(output, '')
@setup({'firstof02': '{% firstof a b c %}'})
def test_firstof02(self):
output = self.engine.render_to_string('firstof02', {'a': 1, 'c': 0, 'b': 0})
self.assertEqual(output, '1')
@setup({'firstof03': '{% firstof a b c %}'})
def test_firstof03(self):
output = self.engine.render_to_string('firstof03', {'a': 0, 'c': 0, 'b': 2})
self.assertEqual(output, '2')
@setup({'firstof04': '{% firstof a b c %}'})
def test_firstof04(self):
output = self.engine.render_to_string('firstof04', {'a': 0, 'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof05': '{% firstof a b c %}'})
def test_firstof05(self):
output = self.engine.render_to_string('firstof05', {'a': 1, 'c': 3, 'b': 2})
self.assertEqual(output, '1')
@setup({'firstof06': '{% firstof a b c %}'})
def test_firstof06(self):
output = self.engine.render_to_string('firstof06', {'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof07': '{% firstof a b "c" %}'})
def test_firstof07(self):
output = self.engine.render_to_string('firstof07', {'a': 0})
self.assertEqual(output, 'c')
@setup({'firstof08': '{% firstof a b "c and d" %}'})
def test_firstof08(self):
output = self.engine.render_to_string('firstof08', {'a': 0, 'b': 0})
self.assertEqual(output, 'c and d')
@setup({'firstof09': '{% firstof %}'})
def test_firstof09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('firstof09')
@setup({'firstof10': '{% firstof a %}'})
def test_firstof10(self):
output = self.engine.render_to_string('firstof10', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof11': '{% firstof a b %}'})
def test_firstof11(self):
output = self.engine.render_to_string('firstof11', {'a': '<', 'b': '>'})
self.assertEqual(output, '<')
@setup({'firstof12': '{% firstof a b %}'})
def test_firstof12(self):
output = self.engine.render_to_string('firstof12', {'a': '', 'b': '>'})
self.assertEqual(output, '>')
@setup({'firstof13': '{% autoescape off %}{% firstof a %}{% endautoescape %}'})
def test_firstof13(self):
output = self.engine.render_to_string('firstof13', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof14': '{% firstof a|safe b %}'})
def test_firstof14(self):
output = self.engine.render_to_string('firstof14', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof15': '{% firstof a b c as myvar %}'})
def test_firstof15(self):
ctx = {'a': 0, 'b': 2, 'c': 3}
output = self.engine.render_to_string('firstof15', ctx)
self.assertEqual(ctx['myvar'], '2')
self.assertEqual(output, '')
@setup({'firstof16': '{% firstof a b c as myvar %}'})
def test_all_false_arguments_asvar(self):
ctx = {'a': 0, 'b': 0, 'c': 0}
output = self.engine.render_to_string('firstof16', ctx)
self.assertEqual(ctx['myvar'], '')
self.assertEqual(output, '')
|
wkschwartz/django
|
tests/template_tests/syntax_tests/test_firstof.py
|
Python
|
bsd-3-clause
| 3,503
|
tests = "foo"
for t in tests:
try:
for t in []:
print t
except Exception:
continue
|
asedunov/intellij-community
|
python/testData/codeInsight/controlflow/fortrycontinue.py
|
Python
|
apache-2.0
| 119
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.SVM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.estimators import svm
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import test
class SVMTest(test.TestCase):
def testRealValuedFeaturesPerfectlySeparable(self):
"""Tests SVM classifier with real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([[0.0], [1.0], [3.0]]),
'feature2': constant_op.constant([[1.0], [-1.2], [1.0]]),
}, constant_op.constant([[1], [0], [1]])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=0.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are not only separable but there exist weights (for instance
# w1=0.0, w2=1.0) that satisfy the margin inequalities (y_i* w^T*x_i >=1).
# The unregularized loss should therefore be 0.0.
self.assertAlmostEqual(loss, 0.0, places=3)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithL2Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([0.5, 1.0, 1.0]),
'feature2': constant_op.constant([1.0, -1.0, 0.5]),
}, constant_op.constant([1, 0, 1])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are in general separable. Also, if there was no regularization,
# the margin inequalities would be satisfied too (for instance by w1=1.0,
# w2=5.0). Due to regularization, smaller weights are chosen. This results
# to a small but non-zero uneregularized loss. Still, all the predictions
# will be correct resulting to perfect accuracy.
self.assertLess(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testMultiDimensionalRealValuedFeaturesWithL2Regularization(self):
"""Tests SVM with multi-dimensional real features and L2 regularization."""
# This is identical to the one in testRealValuedFeaturesWithL2Regularization
# where 2 tensors (dense features) of shape [3, 1] have been replaced by a
# single tensor (dense feature) of shape [3, 2].
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'multi_dim_feature':
constant_op.constant([[0.5, 1.0], [1.0, -1.0], [1.0, 0.5]]),
}, constant_op.constant([[1], [0], [1]])
multi_dim_feature = feature_column.real_valued_column(
'multi_dim_feature', dimension=2)
svm_classifier = svm.SVM(feature_columns=[multi_dim_feature],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
self.assertLess(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithMildL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([[0.5], [1.0], [1.0]]),
'feature2': constant_op.constant([[1.0], [-1.0], [0.5]]),
}, constant_op.constant([[1], [0], [1]])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.5,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# Adding small L1 regularization favors even smaller weights. This results
# to somewhat moderate unregularized loss (bigger than the one when there is
# no L1 regularization. Still, since L1 is small, all the predictions will
# be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithBigL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'feature1': constant_op.constant([0.5, 1.0, 1.0]),
'feature2': constant_op.constant([[1.0], [-1.0], [0.5]]),
}, constant_op.constant([[1], [0], [1]])
feature1 = feature_column.real_valued_column('feature1')
feature2 = feature_column.real_valued_column('feature2')
svm_classifier = svm.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=3.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# When L1 regularization parameter is large, the loss due to regularization
# outweights the unregularized loss. In this case, the classifier will favor
# very small weights (in current case 0) resulting both big unregularized
# loss and bad accuracy.
self.assertAlmostEqual(loss, 1.0, places=3)
self.assertAlmostEqual(accuracy, 1 / 3, places=3)
def testSparseFeatures(self):
"""Tests SVM classifier with (hashed) sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.8], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}, constant_op.constant([[0], [1], [1]])
price = feature_column.real_valued_column('price')
country = feature_column.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
svm_classifier = svm.SVM(feature_columns=[price, country],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testBucketizedFeatures(self):
"""Tests SVM classifier with bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [800.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [800.0], [500.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column.bucketized_column(
feature_column.real_valued_column('price'), boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column.bucketized_column(
feature_column.real_valued_column('sq_footage'), boundaries=[650.0])
svm_classifier = svm.SVM(feature_columns=[price_bucket, sq_footage_bucket],
example_id_column='example_id',
l1_regularization=0.1,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testMixedFeatures(self):
"""Tests SVM classifier with a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column.real_valued_column('price')
sq_footage_bucket = feature_column.bucketized_column(
feature_column.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
svm_classifier = svm.SVM(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
example_id_column='example_id',
weight_column_name='weights',
l1_regularization=0.1,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
if __name__ == '__main__':
test.main()
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/contrib/learn/python/learn/estimators/svm_test.py
|
Python
|
apache-2.0
| 11,485
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports the unit-testing of logging code.
Provides support for unit-testing messages logged using the built-in
logging module.
Inherit from the LoggingTestCase class for basic testing needs. For
more advanced needs (e.g. unit-testing methods that configure logging),
see the TestLogStream class, and perhaps also the LogTesting class.
"""
import logging
import unittest2 as unittest
class TestLogStream(object):
"""Represents a file-like object for unit-testing logging.
This is meant for passing to the logging.StreamHandler constructor.
Log messages captured by instances of this object can be tested
using self.assertMessages() below.
"""
def __init__(self, test_case):
"""Create an instance.
Args:
test_case: A unittest.TestCase instance.
"""
self._test_case = test_case
self.messages = []
"""A list of log messages written to the stream."""
# Python documentation says that any object passed to the StreamHandler
# constructor should support write() and flush():
#
# http://docs.python.org/library/logging.html#module-logging.handlers
def write(self, message):
self.messages.append(message)
def flush(self):
pass
def assertMessages(self, messages):
"""Assert that the given messages match the logged messages.
messages: A list of log message strings.
"""
self._test_case.assertEqual(messages, self.messages)
class LogTesting(object):
"""Supports end-to-end unit-testing of log messages.
Sample usage:
class SampleTest(unittest.TestCase):
def setUp(self):
self._log = LogTesting.setUp(self) # Turn logging on.
def tearDown(self):
self._log.tearDown() # Turn off and reset logging.
def test_logging_in_some_method(self):
call_some_method() # Contains calls to _log.info(), etc.
# Check the resulting log messages.
self._log.assertMessages(["INFO: expected message #1",
"WARNING: expected message #2"])
"""
def __init__(self, test_stream, handler):
"""Create an instance.
This method should never be called directly. Instances should
instead be created using the static setUp() method.
Args:
test_stream: A TestLogStream instance.
handler: The handler added to the logger.
"""
self._test_stream = test_stream
self._handler = handler
@staticmethod
def _getLogger():
"""Return the logger being tested."""
# It is possible we might want to return something other than
# the root logger in some special situation. For now, the
# root logger seems to suffice.
return logging.getLogger()
@staticmethod
def setUp(test_case, logging_level=logging.INFO):
"""Configure logging for unit testing.
Configures the root logger to log to a testing log stream.
Only messages logged at or above the given level are logged
to the stream. Messages logged to the stream are formatted
in the following way, for example--
"INFO: This is a test log message."
This method should normally be called in the setUp() method
of a unittest.TestCase. See the docstring of this class
for more details.
Returns:
A LogTesting instance.
Args:
test_case: A unittest.TestCase instance.
logging_level: An integer logging level that is the minimum level
of log messages you would like to test.
"""
stream = TestLogStream(test_case)
handler = logging.StreamHandler(stream)
handler.setLevel(logging_level)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
# Notice that we only change the root logger by adding a handler
# to it. In particular, we do not reset its level using
# logger.setLevel(). This ensures that we have not interfered
# with how the code being tested may have configured the root
# logger.
logger = LogTesting._getLogger()
logger.addHandler(handler)
return LogTesting(stream, handler)
def tearDown(self):
"""Assert there are no remaining log messages, and reset logging.
This method asserts that there are no more messages in the array of
log messages, and then restores logging to its original state.
This method should normally be called in the tearDown() method of a
unittest.TestCase. See the docstring of this class for more details.
"""
self.assertMessages([])
logger = LogTesting._getLogger()
logger.removeHandler(self._handler)
def messages(self):
"""Return the current list of log messages."""
return self._test_stream.messages
# FIXME: Add a clearMessages() method for cases where the caller
# deliberately doesn't want to assert every message.
# We clear the log messages after asserting since they are no longer
# needed after asserting. This serves two purposes: (1) it simplifies
# the calling code when we want to check multiple logging calls in a
# single test method, and (2) it lets us check in the tearDown() method
# that there are no remaining log messages to be asserted.
#
# The latter ensures that no extra log messages are getting logged that
# the caller might not be aware of or may have forgotten to check for.
# This gets us a bit more mileage out of our tests without writing any
# additional code.
def assertMessages(self, messages):
"""Assert the current array of log messages, and clear its contents.
Args:
messages: A list of log message strings.
"""
try:
self._test_stream.assertMessages(messages)
finally:
# We want to clear the array of messages even in the case of
# an Exception (e.g. an AssertionError). Otherwise, another
# AssertionError can occur in the tearDown() because the
# array might not have gotten emptied.
self._test_stream.messages = []
# This class needs to inherit from unittest.TestCase. Otherwise, the
# setUp() and tearDown() methods will not get fired for test case classes
# that inherit from this class -- even if the class inherits from *both*
# unittest.TestCase and LoggingTestCase.
#
# FIXME: Rename this class to LoggingTestCaseBase to be sure that
# the unittest module does not interpret this class as a unittest
# test case itself.
class LoggingTestCase(unittest.TestCase):
"""Supports end-to-end unit-testing of log messages.
Sample usage:
class SampleTest(LoggingTestCase):
def test_logging_in_some_method(self):
call_some_method() # Contains calls to _log.info(), etc.
# Check the resulting log messages.
self.assertLog(["INFO: expected message #1",
"WARNING: expected message #2"])
"""
def setUp(self):
self._log = LogTesting.setUp(self)
def tearDown(self):
self._log.tearDown()
def logMessages(self):
"""Return the current list of log messages."""
return self._log.messages()
# FIXME: Add a clearMessages() method for cases where the caller
# deliberately doesn't want to assert every message.
# See the code comments preceding LogTesting.assertMessages() for
# an explanation of why we clear the array of messages after
# asserting its contents.
def assertLog(self, messages):
"""Assert the current array of log messages, and clear its contents.
Args:
messages: A list of log message strings.
"""
self._log.assertMessages(messages)
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/common/system/logtesting.py
|
Python
|
bsd-3-clause
| 9,435
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from distlib.resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
KrzysztofStachanczyk/Sensors-WWW-website
|
www/env/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
|
Python
|
gpl-3.0
| 26,964
|
# -*- coding: utf-8 -*-
import os.path
from fabric.api import settings, run, cd, lcd, put, get, local, env, with_settings
from fabric.colors import green
from .. import versions
from .. import distro
from .. import tools
class Deployment(object):
"""The core :class:`Deployment <Deployment>` object. All Fabric tasks built with
Parcel will probably use this an instance of this class.
"""
virtual = "vp"
build_dir = '.parcel'
# these are the full text versions of the scripts
prerm = None
postrm = None
preinst = None
postinst = None
# these are a list representation of commands to go into the scripts
# if the control script templating is used
prerm_lines = []
postrm_lines = []
preinst_lines = []
postinst_lines = []
def __init__(self, app_name, build_deps=[], run_deps=[], path=".", base=None, arch=distro.Debian(), version=None):
#: The architecture of the build host. This should be a :class:`Distro <Distro>` object.
self.arch = arch
remotehome = run('echo $HOME').strip()
# if path isn't set, make it the home directory of the build user
if base is None:
base = remotehome
elif not base.startswith('/'):
base = os.path.join(remotehome, base)
# update and install missing build dependency packages
arch.update_packages()
if build_deps:
arch.build_deps(build_deps)
# the version in the archives of this package if we have been built and uploaded before.
self.version = arch.version(app_name).next() if version is None else versions.Version(version)
#: The name of the resulting package.
self.app_name = app_name
#: A list of packages that must be installed to run the resulting package.
self.run_deps = run_deps
#: A list of packages that need to be installed to build the software.
self.build_deps = build_deps
self.pkg_name = app_name.lower()
#: The directory that will be used as the base level directory.
self.path = os.path.realpath(path)
#: Location of files during build on build host. Default is user's home directory.
#: If path is relative, it's relative to the remote user's home directory. If the path is absolute,
#: it's used as is.
self.base_path = os.path.join(remotehome,self.build_dir)
self.pkg_name = app_name.lower()
self.root_path = os.path.join(self.base_path,"root") # where the final root fs is located
# the path the app will be installed into
self.app_path = os.path.join(base,'%s-%s'%(self.pkg_name,self.version))
# the build path
self.build_path = os.path.join(self.root_path, self.app_path[1:]) # cut the first / off app_path
print "ROOT_PATH", self.root_path
print "BASE_PATH",self.base_path
print "APP PATH",self.app_path
print "BUILD PATH",self.build_path
self._clean()
def prepare_app(self, branch=None, requirements="requirements.txt"):
"""Creates the necessary directories on the build server, checks out the desired branch (None means current),
creates a virtualenv and populates it with dependencies from requirements.txt.
As a bonus it also fixes the shebangs (#!) of all scripts in the virtualenv to point the correct Python path
on the target system."""
self._sync_app()
self._add_venv(requirements)
def add_to_root_fs(self,localfile,remotepath):
"""Add a local file to the root package path.
If remote path ends in /, the filename is copied into
that directory. If the remote path doesn't end in /, it represents
the final filename.
"""
while remotepath[0]=='/':
remotepath=remotepath[1:]
put(localfile,os.path.join(self.root_path,remotepath))
def add_data_to_root_fs(self, data, remotepath):
"""Copies data in file on remotepath (relative to final root)"""
while remotepath[0]=='/':
remotepath=remotepath[1:]
tools.write_contents_to_remote(data,os.path.join(self.root_path, remotepath))
def compile_python(self):
# compile all python (with virtual python)
vpython_path = os.path.join(self.venv_path,'bin/python')
command = '%s -c "import compileall;compileall.compile_dir(\'%s\', force=1)"'%(vpython_path, self.build_path)
run(command)
def clear_py_files(self):
# clear all .py files
run('find "%s" -name "*.py" -exec rm {} \;'%(self.build_path))
def add_prerm(self, lines):
"""Add lines to the prerm file"""
self.prerm_lines.extend(lines)
def add_postrm(self, lines):
"""Add lines to the postrm file"""
self.postrm_lines.extend(lines)
def add_preinst(self, lines):
"""Add lines to the preinst file"""
self.preinst_lines.extend(lines)
def add_postinst(self, lines):
"""Add lines to the postinst file"""
self.postinst_lines.extend(lines)
def build_package(self, templates=True):
"""Takes the whole app including the virtualenv, packages it using fpm and downloads it to the local host.
The version of the package is the build number - which is just the latest package version in our Ubuntu repositories plus one.
"""
# add install and remove templates, use defaults if not supplied
if templates:
if not self.prerm:
self.arch.defaults.prerm_template
self.write_prerm_template(self.arch.defaults.prerm_template)
if not self.postrm:
self.write_postrm_template(self.arch.defaults.postrm_template)
if not self.preinst:
self.write_preinst_template(self.arch.defaults.preinst_template)
if not self.postinst:
self.write_postinst_template(self.arch.defaults.postinst_template)
with cd(self.base_path):
self.deps_str = '-d ' + ' -d '.join(self.run_deps)
self.dirs_str = '.'
if self.prerm or self.postrm or self.preinst or self.postinst:
run("rm -rf installscripts && mkdir -p installscripts")
# render pre/posts
hooks = []
if self.prerm:
prerm = self.prerm.format(self)
tools.write_contents_to_remote(prerm,'installscripts/prerm')
hooks.extend(['--before-remove', '../installscripts/prerm'])
if self.postrm:
postrm = self.postrm.format(self)
tools.write_contents_to_remote(postrm,'installscripts/postrm')
hooks.extend(['--after-remove', '../installscripts/postrm'])
if self.preinst:
tools.write_contents_to_remote(self.preinst,'installscripts/preinst')
hooks.extend(['--before-install', '../installscripts/preinst'])
if self.postinst:
tools.write_contents_to_remote(self.postinst,'installscripts/postinst')
hooks.extend(['--after-install', '../installscripts/postinst'])
self.hooks_str = ' '.join(hooks)
self.arch.build_package(deployment=self)
def write_prerm_template(self, template):
"""Take a template prerm script and format it with appname and prerm_lines
If you call this function you must supply a template string that includes {app_name} and {lines}."""
self.prerm = template.format(app_name=self.app_name, lines="\n ".join(self.prerm_lines))
def write_postrm_template(self, template):
"""Take a template postrm script and format it with appname and postrm_lines
If you call this function you must supply a template string that includes {app_name} and {lines}."""
self.postrm = template.format(app_name=self.app_name, lines="\n ".join(self.postrm_lines))
def write_preinst_template(self, template):
"""Take a template preinst script and format it with appname and preinst_lines
If you call this function you must supply a template string that includes {app_name} and {lines}."""
self.preinst = template.format(app_name=self.app_name, lines="\n ".join(self.preinst_lines))
def write_postinst_template(self, template):
"""Take a template postinst script and format it with appname and postinst_lines
If you call this function you must supply a template string that includes {app_name} and {lines}."""
self.postinst = template.format(app_name=self.app_name, lines="\n ".join(self.postinst_lines))
def _clean(self):
"""Make sure the root filesystem directory is empty."""
run('rm -rf "%s"'%self.root_path)
def _sync_app(self):
"""There is no revision control at the moment so... just copy directory over."""
print self.build_path
tools.rsync([self.path+'/'],self.build_path,rsync_ignore='.rsync-ignore')
def _add_venv(self,requirements="requirements.txt"):
"""Builds virtualenv on remote host and installs from requirements.txt.
:param requirements: The name of the requirements.txt file.
"""
self.venv_path = os.path.join(self.build_path, self.virtual)
run('virtualenv %s'%(self.venv_path))
if requirements and os.path.exists(requirements):
run('PIP_DOWNLOAD_CACHE="%s" %s install -r %s'%(
self.arch.pip_download_cache,
os.path.join(self.venv_path, 'bin/pip'),
os.path.join(self.build_path, requirements))
)
# venv_root is final path
self.venv_root = os.path.join(self.app_path, self.virtual)
# lets make sure this venv is relinked on installation
self.add_postinst(['virtualenv "%s"'%self.venv_root])
# and we have the virtualenv executable
self.run_deps.append('python-virtualenv')
|
gipi/parcel
|
parcel/deploy/deploy.py
|
Python
|
isc
| 10,299
|
# -*- coding: utf-8 -*-
"""Tuple of poolservers
"""
poolservers = (
'nl.pool.ntp.org',
'europe.pool.ntp.org',
'pool.ntp.org',
)
|
igroen/ntpdatetime
|
ntpdatetime/config.py
|
Python
|
isc
| 141
|
from json import dump
from os import environ
from os.path import expanduser
from sys import argv
from tempfile import NamedTemporaryFile
from django.core.management import execute_from_command_line
def run_installer(title, allowed_settings=None, settings_path="/etc/django.conf"):
if allowed_settings is None:
allowed_settings = ['base_url', 'database']
config = {
'title': title,
'allowed_settings': allowed_settings,
}
config_file = NamedTemporaryFile()
dump(config, config_file)
config_file.flush()
environ["DJANGO_INSTALLER_CONFIG"] = config_file.name
environ["DJANGO_INSTALLER_SETTINGS"] = expanduser(settings_path)
execute_from_command_line([
argv[0],
"runserver",
#"--noreload",
"--settings=django_installer.installer.settings",
] + argv[1:])
|
trehn/django-installer
|
django_installer/__init__.py
|
Python
|
isc
| 853
|
from __future__ import print_function
from ignoretogether import IgnoreRuleSet
if __name__ == '__main__':
print('Simulation of commands which would be run on example-ruleset.')
rs = IgnoreRuleSet('example', 'example-ruleset.yml')
rs.load()
print('Ruleset installation.')
rs.install()
print('Ruleset uninstallation.')
rs.uninstall()
|
kaniini/ignore-together
|
tests.py
|
Python
|
isc
| 365
|
#
# Copyright (c) 2014 Chris Jerdonek. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
Support for parsing and writing JSON test cases.
"""
import os
from openrcv.formats.common import Format, FormatWriter
from openrcv.jsonlib import write_json
from openrcv.jcmodels import JsonCaseContestInput
ENCODING_JSON = 'utf-8'
class JsonCaseFormat(Format):
@property
def contest_writer_cls(self):
return JsonCaseContestWriter
class JsonCaseContestWriter(FormatWriter):
@property
def get_output_infos(self):
return (self.get_output_info, )
def get_output_info(self, output_dir):
return os.path.join(output_dir, "contest.json"), ENCODING_JSON
def resource_write(self, resource, contest):
jc_contest = JsonCaseContestInput.from_model(contest)
write_json(jc_contest, resource=resource)
|
cjerdonek/open-rcv
|
openrcv/formats/jscase.py
|
Python
|
mit
| 1,888
|
# -*- coding: utf-8 -*-
import codecs
from anpy.question import parse_question
def test_question_parsing():
xml = codecs.open('tests/resources/questions/q14_14-47351QE.xml').read()
parsing_result = parse_question('http://questions.assemblee-nationale.fr/q14/14-47351QE.htm', xml)
assert ['@TYPE', 'LEGISLATURE', 'DEPOT', 'AUTEUR', 'GROUPE', 'CIRCONSCRIPTION', 'INDEXATION_AN', 'MINI', 'MINA',
'ERRATUM_QUESTION', 'SIGNALEMENT', 'RENOUVELLEMENT', 'CLOTURE', 'REPONSE', 'ERRATUM_REPONSE',
'URL'] == list(parsing_result.keys())
|
regardscitoyens/anpy
|
tests/test_question_parsing.py
|
Python
|
mit
| 566
|
""" Initialize package """
|
SmithSamuelM/openwestdemo
|
demoing/__init__.py
|
Python
|
mit
| 28
|
# some simple set up for analysis to avoid repetition
import pandas as pd
import numpy as np
ISO_COUNTRY_CODES = {
'Brazil': 76,
'China': 156,
'India': 356,
'Nigeria': 566
}
FAOSTAT_COUNTRY_CODES = {
'Brazil': 21,
'China': 351,
'India': 100,
'Nigeria': 159
}
ag = pd.read_csv('dump.csv')
ag.rename(columns=dict(zip(ag.columns,['year', 'country', 'country_code', 'land_type', 'land_code', 'pct_ag_change', 'pct_ag_change_flg', 'pct_ag_area', 'pct_ag_area_flg', 'pct_land_area', 'pct_land_area_flg'])),inplace=True)
codes = ag[['land_type','land_code']]
codes = codes.drop_duplicates()
codes = codes.set_index('land_code')
# land types
# 6610 Agricultural area
# 6700 Protected terrestrial areas
# 6655 Permanent meadows and pastures
# 6690 Total area equipped for irrigation
# 6621 Arable land
# 6650 Permanent crops
# 6713 Conservation agriculture area: >30% ground cover
# 6671 Agricultural area organic, total
# ag[isnan(ag.pct_ag_area)].land_type.unique()
# ag[isnan(ag.pct_land_area)].land_type.unique()
ag = ag.drop(['pct_ag_change_flg','pct_ag_area_flg','pct_land_area_flg'],axis=1)
ag = ag[ag.year > 1983]
ag = ag[ag.country_code.isin(FAOSTAT_COUNTRY_CODES.values())]
ag_area = ag[ag.land_code == 6610]
agpiv = ag_area.pivot('year','country','pct_land_area')
# agpiv.plot()
brazil = ag[ag.country_code == 21]
brazil = brazil[(brazil.land_code != 6610) | (brazil.land_code != 6700)]
brag = brazil[np.isnan(brazil.pct_land_area)]
brag = brag.pivot('year','land_type','pct_ag_area')
|
NUKnightLab/fao-explorer
|
data/agri-environmental/analyze.py
|
Python
|
mit
| 1,744
|
#!/usr/bin/env python
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
try:
from Tkinter import *
except:
pass
import sys,os
class pKa_base:
def set_graph_size(self,x_max,x_min,y_max,y_min,x_tick_level=1.0,y_tick_level=0.25,width=1200,height=450):
self.y_max=y_max
self.y_min=y_min
self.x_max=x_max
self.x_min=x_min
self.graph_y_size=height-150
self.graph_x_size=width-150
self.fact_y=self.graph_y_size/(self.y_max-self.y_min)
self.fact_x=self.graph_x_size/(self.x_max-self.x_min)
self.x_tick_level=x_tick_level
self.y_tick_level=y_tick_level
return
#
# ----
#
def get_xy(self,pH,crg):
#
# Get x and y coordinates
#
if not hasattr(self,'y_min'):
self.set_graph_size(20,0.00001,1.0,-1.0)
y=crg-self.y_min
y=50+self.graph_y_size-self.fact_y*y
return self.get_x(pH),y
#
# ----
#
def get_x(self,pH):
#
# pH
#
return (pH-self.x_min)*self.fact_x+70
#
# -----------------
#
def draw_ordinates(self,canvas,y_label='Charge',x_label='pH'):
#
# Draw the axes
#
if not hasattr(self,'y_min'):
self.set_graph_size(20,0.00001,1.0,-1.0)
startx,starty=self.get_xy(self.x_min,self.y_min)
endx,endy=self.get_xy(self.x_max,self.y_max)
y_axis=starty
x_axis=startx
#
# pH axis
#
self.pH_axis(canvas,startx,starty,endx,endy,x_label)
#
# Charge axis
#
canvas.create_line(x_axis,y_axis,x_axis,endy,fill='black',width=self.linewidth)
canvas.create_text(x_axis,endy-15,text=y_label,fill='black')
#
# Tick marks and tick labels
#
for tickcrg in range(int(self.y_min*100),int(self.y_max*100)+int(self.y_tick_level*100.0),int(self.y_tick_level*100.0)):
dummy,y=self.get_xy(0.0,float(tickcrg)/100.0)
canvas.create_line(x_axis,y,x_axis-5,y,fill='black',width=self.linewidth)
canvas.create_text(x_axis-25,y,text='%5.2f' %(float(tickcrg)/100.0),fill='black')
return
#
# ----------------
#
def pH_axis(self,canvas,startx,starty,endx,endy,x_label='pH'):
#
# Draw the pH axis
#
y_axis=starty
x_axis=startx
#
# pH axis
#
canvas.create_line(startx,starty,endx,starty,fill='black',width=self.linewidth)
canvas.create_text(endx+10,starty,text=x_label)
#
# Tick marks and tick labels
#
for tickph in range(int(self.x_min)*10,int(self.x_max*10.0)+int(self.x_tick_level*10.0),int(self.x_tick_level*10.0)):
x,dummy=self.get_xy(float(tickph/10.0),self.y_min)
canvas.create_line(x,y_axis,x,y_axis+5,fill='black',width=self.linewidth)
canvas.create_text(x,y_axis+15,text='%5.1f' %(float(tickph)/10.0),fill='black')
return
#
# ----------
#
def update_curves(self,curves=None,labels=None):
#
# Delete all lines from last round
#
if not getattr(self,'lines',None):
self.lines={}
#
for line in self.lines.keys():
self.tc.delete(line)
del self.lines[line]
#
# If no curves then return
#
if not curves:
return
#
# Define colour_order if it's not defined already
#
if not getattr(self,'colour_order',None):
self.colour_order=['black','red','blue','green','grey','magenta','cyan']
#
# Draw the titration curves
#
groups=curves.keys()
groups.sort()
#pHvalues=curves[groups[0]].keys()
#pHvalues.sort()
#pHvalues=pHvalues[:-1]
#
# Keep track of the label positions
#
self.label_pos=[]
done_label={}
#
# Start looping
#
group_count=0
for group in groups:
pHvalues=curves[group].keys()
pHvalues.sort()
pHvalues=pHvalues[:-1]
lastpH=pHvalues[0]
lastcrg=curves[group][lastpH]
colour=self.colour_order[group_count%len(self.colour_order)]
#
#
pkadone=None
for pH in pHvalues[1:]:
lastx,lasty=self.get_xy(lastpH,lastcrg)
crg=curves[group][pH]
x,y=self.get_xy(pH,crg)
self.lines[(self.tc.create_line(lastx,lasty,float(x),float(y),
fill=colour,
width=self.linewidth))]=1
lastcrg=crg
lastpH=pH
#
# Label and pKa value
#
pKa=curves[group]['pKa']
if abs(pKa-pH)<=abs(pHvalues[0]-pHvalues[1]) and not pkadone:
ty=float(y)#+float(group_count)*10.0
newx,newy=self.resolve_clash(x,ty)
#if newx!=x or newy!=ty:
# #
# # Draw a line to the label position
# #
# self.lines[self.tc.create_line(x,y,newx-50,newy,fill=colour,
# width=self.linewidth)]=1
x=self.tc.create_text(newx,newy,text='%s, pKa: %.1f' %(group,pKa),fill=colour)
self.lines[x]=1
self.label_pos.append([newx,newy])
pkadone=1
#
# Did we do a label for this group?
#
if not pkadone:
x,y=self.get_xy(pHvalues[0],curves[group][pHvalues[0]])
x=x+50
y=y-10
newx,newy=self.resolve_clash(x,y)
if newx!=x or newy!=y:
#
# Draw a line to the label position
#
self.lines[self.tc.create_line(x,y,newx,newy,fill=colour,
width=self.linewidth)]=1
x=self.tc.create_text(newx,newy,text='%s, pKa: %.1f' %(group,pKa),fill=colour)
self.lines[x]=1
self.label_pos.append([newx,newy])
#
# Update the counter for colours
#
group_count=group_count+1
return
#
# -----
#
def resolve_clash(self,x,y):
"""Resolve label clashes if any..."""
label_size_x=100
label_size_y=20
orgy=y
orgx=x
clash_x=0
clash_y=0
counter=0
first_check=1
while (clash_x>0 or clash_y>0 or first_check) and counter<200 :
clash_x=0
clash_y=0
for xold,yold in self.label_pos:
diffx=abs(x-xold)
diffy=abs(y-yold)
if diffx<label_size_x and diffy<label_size_y:
# Clash
#
# Record the smallest clash distance
if abs(label_size_x-diffx) < abs(label_size_y-diffy):
clash_x=clash_x+1
else:
clash_y=clash_y+1
#
# Resolve clash
#
counter=counter+1
if not first_check:
if clash_y>0:
y=y+10
if y>400:
x=x+10
y=orgy
if clash_x>0:
x=x+10
else:
first_check=None
#
# return the ok positions
#
return x,y
#
# ------
#
def menubutton_list(self,window=None,variable=None,list=None,default=None,indicatoron=0):
variable.set(default)
# the button
button=Menubutton(window,textvariable=variable,relief=RAISED)
# the menu
menu=Menu(button,tearoff=0)
button['menu']=menu
for type in list:
menu.add_radiobutton(label=type,variable=variable,value=type,indicatoron=indicatoron)
return button
|
dmnfarrell/peat
|
pKaTool/pKa_base.py
|
Python
|
mit
| 9,182
|
# queries to do with module/class/function relationships
from __future__ import generators
from bike.globals import *
from getTypeOf import getTypeOf, getTypeOfExpr
from bike.parsing.newstuff import generateModuleFilenamesInPythonPath, generateModuleFilenamesInPackage, getPythonPath
from bike.parsing.pathutils import getPackageBaseDirectory
from bike.query.common import MatchFinder, walkLinesContainingStrings, getScopeForLine
from bike import log
from bike.parsing.fastparserast import Module
import re
def getRootClassesOfHierarchy(klass):
if klass is None: # i.e. dont have base class in our ast
return None
if klass.getBaseClassNames() == []: # i.e. is a root class
return [klass]
else:
rootclasses = []
for base in klass.getBaseClassNames():
baseclass = getTypeOf(klass,base)
rootclass = getRootClassesOfHierarchy(baseclass)
if rootclass is None: # base class not in our ast
rootclass = [klass]
rootclasses+=rootclass
return rootclasses
|
srusskih/SublimeBicycleRepair
|
bike/query/relationships.py
|
Python
|
mit
| 1,066
|
import argparse
import importlib
import logging
import sys
def global_args(parser):
parser.add_argument('--debug', action='store_true',
help='display debug output')
parser.add_argument('-q', '--quiet', action='store_true',
help='squash all output')
def basic_args(parser):
parser.add_argument('-r', '--repo', help='Repository to vendor')
def setup_parser():
p = argparse.ArgumentParser(prog='git-vendor',
description='Vendor git repositories easily')
sp = p.add_subparsers(dest='action', metavar='actions')
init = sp.add_parser('init', help='Create sync configuration')
sync = sp.add_parser('sync', help='Vendor code')
sp.add_parser('version', help='print version')
# add subcommand options
global_args(init)
global_args(sync)
basic_args(init)
basic_args(sync)
sync.add_argument('-t', '--tag', help='Tag to vendor')
sync.add_argument('-d', '--directory',
default='.', help='Output Directory')
sync.add_argument('--allow-dirty', action='store_true',
help='Allow operations on a dirty repository')
return p
def setup_logging(debug=None):
logger = logging.getLogger('git-vendor')
logger.setLevel(logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
if debug:
f = '%(asctime)s %(levelname)s %(name)s: %(message)s'
else:
f = '%(levelname)s: %(message)s'
formatter = logging.Formatter(f)
ch.setFormatter(formatter)
if debug is None:
ch = logging.NullHandler()
logger.addHandler(ch)
return logger
def main(args=None):
parser = setup_parser()
known, unknown = parser.parse_known_args(args)
exit = 0
if known.action == 'version':
import pkg_resources
print pkg_resources.require('git-vendor')[0].version
sys.exit(0)
log = setup_logging(known.debug)
try:
action = importlib.import_module("..%s" % known.action,
'gitvendor.%s' % known.action)
exit = action.main(known, unknown)
except Exception as e:
if known.debug:
log.exception(e.message)
sys.exit(exit)
else:
print(e)
log.critical(e.message)
sys.exit(exit)
if __name__ == "__main__":
main()
|
chuckbutler/git-vendor
|
gitvendor/cli.py
|
Python
|
mit
| 2,474
|
import gym
from gym import wrappers
import numpy as np
class Environment:
""" Defines an environment the actor interacts with.
"""
def __init__(self):
""" Initializes an environment.
"""
pass
def next_obs(self, cur_action):
""" Takes an action in the environment.
"""
raise NotImplementedError
def new_episode(self):
""" Starts a new episode of the environment.
"""
raise NotImplementedError
@property
def action_shape(self):
""" Returns the shape of the action.
"""
raise NotImplementedError
@property
def obs_shape(self):
""" Returns the shape of the observation.
"""
raise NotImplementedError
class GymEnvironment(Environment):
def __init__(self, name, monitor = False):
""" Initializes a gym environment.
Args
monitor: If True, wraps the environment with the openAI gym monitor.
"""
self.env = gym.make(name)
self.env = wrappers.Monitor(self.env, './results/cart_pole_1')
self._cur_obs = None
def next_obs(self, cur_action, render = False):
""" Runs a step in the gym environment.
Args:
action: Current action to perform
render: (Optional) Wether to render environment or not.
Returns:
obs: State of the environment after step.
reward: Reward received from the step.
done: Bool signaling terminal step.
"""
self.cur_obs, self.cur_reward, self.done, _ = self.env.step(cur_action)
if (not all(np.isfinite(self.cur_obs))) and (not all(np.isfinite(self.cur_reward))):
import pdb
pdb.set_trace()
if render:
self.env.render()
if self.done:
self.new_episode()
return self.cur_obs, self.cur_reward, self.done
def new_episode(self):
""" Initiates a new episode by resetting the environment.
Returns:
obs: Initial observation of the new episode.
"""
self.cur_obs = self.env.reset()
self.env.render()
return self.cur_obs
@property
def action_size(self):
return self.env.action_space.shape
@property
def obs_size(self):
return self.env.observation_space.shape
@property
def cur_obs(self):
return self._cur_obs
@cur_obs.setter
def cur_obs(self, value):
self._cur_obs = value
|
fizz-ml/pytorch-aux-reward-rl
|
environment.py
|
Python
|
mit
| 2,569
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
import ruamel.yaml as yaml
import time
import colorama
import intelhex
from pprint import pprint
from hexdump import hexdump as hexdump
import keyplus
from keyplus.constants import *
from keyplus.exceptions import KeyplusUSBCommandError
from keyplus.layout import *
from keyplus.device_info import KeyboardPinMapping
from keyplus.keycodes import *
import keyplus.cdata_types
if __name__ == '__main__':
colorama.init()
dev_list = keyplus.find_devices(vid_pid="6666:1111")
print(dev_list)
kb = dev_list[0]
kb.connect()
if 0:
led = 0
led_state = 0
for _ in range(6):
led_state = not led_state
kb.set_indicator_led(led, led_state)
time.sleep(0.5)
kb.reset()
time.sleep(3)
kb.reconnect()
kb.set_indicator_led(led, not led_state)
if 1:
for kb_id in range(64):
try:
print("layer info: ", kb.get_layers(kb_id))
except KeyplusUSBCommandError as err:
if err.code == CMD_ERROR_KEYBOARD_INACTIVE:
continue
else:
raise err
print("name:", kb.device_info.device_name)
print("nrf24_disabled:", kb.device_info.nrf24_disabled)
print("i2c_disabled:", kb.device_info.i2c_disabled)
scan_mode = ScanMode()
scan_mode.set_scan_mode('row_col')
scan_mode.add_row_pins(['D0', 'D1', 'D2', 'D3'])
if 1:
scan_mode.add_column_pins(['A0', 'A1', 'A2', 'A3', 'A4', 'A5'])
for row in range(4):
for col in range(6):
if col == row:
continue
scan_mode.add_key_to_matrix_map(row*6+col, row, col)
else:
scan_mode.add_column_pins(['A5', 'A4', 'A3', 'A2', 'A1', 'A0'])
for row in range(4):
for col in range(6):
# Note: reverse column position in row
scan_mode.add_key_to_matrix_map(row*6+(5-col), row, col)
scan_mode.set_debounce_profile("cherry_mx")
print("scan_mode.to_json:", scan_mode.to_json())
target = kb.get_device_target()
scan_plan = scan_mode.generate_scan_plan(target)
scan_plan_bytes = scan_plan.pack()
print(scan_plan_bytes)
print(repr(scan_plan_bytes))
print(type(scan_plan_bytes))
hexdump(scan_plan_bytes)
new_scan_plan = keyplus.cdata_types.scan_plan_t()
new_scan_plan.unpack(scan_plan_bytes)
pprint(("Matches: {}".format(scan_plan == new_scan_plan), new_scan_plan))
pin_mapping = scan_mode.generate_pin_mapping(target)
pin_mapping_raw = pin_mapping.pack()
hexdump(pin_mapping_raw)
new_pin_mapping = KeyboardPinMapping()
new_pin_mapping.unpack(pin_mapping_raw, new_scan_plan, target)
hexdump(new_pin_mapping.pack())
new_scan_mode = ScanMode()
new_scan_mode.load_raw_data(new_scan_plan, new_pin_mapping)
pprint(vars(scan_mode))
pprint(vars(new_scan_mode))
layout_settings = kb.get_layout_info()
hexdump(layout_settings.pack())
pprint(vars(layout_settings))
layout_device = LayoutDevice()
layout_device.load_raw_data(
kb.device_info, layout_settings, new_pin_mapping
)
pprint(vars(layout_device))
print(("#"*80 + "\n")*3)
scan_mode_test = ScanMode()
test_scan_mode_dict = {
'mode': 'col_row',
'rows': ['D0', 'D1', 'D2', 'D3'],
'cols': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5'],
'matrix_map': [
'r0c0', 'r0c1', 'r0c2', 'r0c3', 'r0c4', 'r0c5',
'r1c0', 'r1c1', 'r1c2', 'r1c3', 'r1c4', 'r1c5',
'r2c0', 'r2c1', 'r2c2', 'r2c3', 'r2c4', 'r2c5',
'r3c0', 'r3c1', 'r3c2', 'r3c3', 'r3c4', 'r3c5',
],
# 'debounce': 'kailh_box',
'debounce': {
"debounce_time_press": 5,
"debounce_time_release": 10,
"trigger_time_press": 1,
"trigger_time_release": 3,
"parasitic_discharge_delay_idle": 2.0,
"parasitic_discharge_delay_debouncing": 10.0,
}
}
scan_mode_test.parse_json(test_scan_mode_dict)
hexdump(scan_mode_test.generate_scan_plan(target).pack())
hexdump(scan_mode_test.generate_pin_mapping(target).pack())
pprint(vars(scan_mode_test))
print(("@"*80 + "\n")*3)
test_layout_device_dict = {
'id': 0,
'layout': 0,
'layout_offset': 0,
'scan_mode': test_scan_mode_dict,
}
layout_device = LayoutDevice()
layout_device.parse_json("test_device", json_obj=test_layout_device_dict)
pprint(vars(layout_device))
print(("*"*80 + "\n")*3)
rf_settings = LayoutRFSettings()
rf_settings.load_random()
pprint(vars(rf_settings))
rf_settings = LayoutRFSettings()
test_rf_settings = {
"aes_encryption_key": "9febeb27209e131ceaf812f73feed577",
"rf_channel": 0x08,
"auto_retransmit_count": 8, # options: 0-15
# TODO: should include retransmit delay option
"data_rate": "2mbps", # options: 2mbps, 1mbps, 250kbps
"transmit_power": "0dB", # options: 0dB, -6dB, -12dB, -18dB
"pipe0": '2aef63473c',
"pipe1": '168d715956',
"pipe2": 'c1',
"pipe3": 'fc',
"pipe4": '63',
"pipe5": '00',
"pipe6": '00',
}
rf_settings.parse_json(test_rf_settings)
pprint(vars(rf_settings))
new_json = rf_settings.to_json()
print(rf_settings, new_json)
new_rf_settings = LayoutRFSettings()
new_rf_settings.parse_json(new_json)
newest_rf_settings = LayoutRFSettings()
newest_rf_settings.load_raw_data(kb.rf_info)
thingy = newest_rf_settings.to_json()
print("newest_rf_settings:", thingy)
newest_raw = newest_rf_settings.generate_rf_settings()
something = newest_raw
print(something.pack)
print(something.pack())
keycode_mapper = KeycodeMapper()
layout = LayoutKeyboard(
layout_id = "foo",
number_layers = 3,
device_sizes = [3, 5],
)
layout.set_keycode(
layer = 0,
device = 0,
key_number = 0,
keycode = "ca-up"
)
layout.set_keycode(
layer = 1,
device = 0,
key_number = 0,
keycode = "a"
)
layout.set_keycode(
layer = 1,
device = 0,
key_number = 1,
keycode = "b"
)
pprint(vars(layout))
for layer in layout.layer_list:
pprint(vars(layer))
for device in layer.device_list:
pprint(vars(device))
keycode_mapper = KeycodeMapper()
pprint(layout.to_json())
pprint(layout.to_keycodes())
new_layout = LayoutKeyboard('new')
new_layout.load_keycodes(layout.to_keycodes())
print("new_layout: ", end="")
pprint(new_layout.to_json())
print("new_layout: ", end="")
pprint(new_layout.to_keycodes())
print(kb.read_layout_data(0, 63))
print()
layout_data = kb.read_whole_layout()
print("Full layout data: ")
hexdump(layout_data)
unpacked_layout_data = kb.unpack_layout_data()
for (i, layout) in enumerate(unpacked_layout_data):
# pprint(vars(layout))
print("This is layout {}:".format(i))
print(layout.to_json())
print()
keyplus_layout2 = KeyplusLayout()
keyplus_layout2.from_yaml_file(
"../layouts/arbitrary_map_tester.yaml",
"../layouts/test_rf_config.yaml",
)
pprint(vars(keyplus_layout2))
keyplus_layout = KeyplusLayout()
with open("../layouts/basic_split_test.yaml") as f:
layout_json = yaml.load(f.read())
with open("./_ignore_rf_settings.yaml") as f:
rf_json = yaml.load(f.read())
keyplus_layout.parse_json(layout_json, rf_json)
# keyplus_layout.get_layout_by_id(2).set_keycode(0, 0, 3, 's-W')
raw_layout = keyplus_layout.build_layout_section(kb.get_device_target())
hexdump(raw_layout)
ihex = intelhex.IntelHex()
ihex.frombytes(raw_layout, 0x7800)
ihex.write_hex_file(
"test_layout_out.hex"
)
print(ihex)
raw_settings = keyplus_layout.build_settings_section(kb.get_device_target())
hexdump(raw_settings)
ihex = intelhex.IntelHex()
ihex.frombytes(raw_settings, 0x7600)
ihex.write_hex_file(
"temp_new.hex"
)
kb.update_settings_section(raw_settings, keep_rf=1)
kb.update_layout_section(raw_layout)
#[len(chunk_list)] kb.set_passthrough_mode(True)
kb.disconnect()
|
ahtn/keyplus
|
host-software/test_api.py
|
Python
|
mit
| 8,590
|
from django.views import generic
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from .models import Note
from django.shortcuts import redirect
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.views.generic import View
from .forms import UserForm
from django.contrib.auth.mixins import LoginRequiredMixin
class IndexView(LoginRequiredMixin, generic.ListView):
login_url = '/login/'
redirect_field_name = 'notes:login_user'
template_name = 'notes/home.html'
context_object_name = 'all_notes'
def get_queryset(self):
return Note.objects.order_by('-date')[:40]
class PrivateIndexView(LoginRequiredMixin, generic.ListView):
login_url = '/login/'
redirect_field_name = 'notes:login_user'
template_name = 'notes/home.html'
context_object_name = 'all_notes'
def get_queryset(self):
return Note.objects.filter(private=True).order_by('-date')[:40]
class PublicIndexView(LoginRequiredMixin, generic.ListView):
login_url = '/login/'
redirect_field_name = 'notes:login_user'
template_name = 'notes/home.html'
context_object_name = 'all_notes'
def get_queryset(self):
return Note.objects.filter(private=False).order_by('-date')[:40]
class DetailView(LoginRequiredMixin, generic.DetailView):
login_url = '/login/'
redirect_field_name = 'notes:login_user'
model = Note
template_name = 'notes/detail.html'
class NoteCreate(LoginRequiredMixin, CreateView):
login_url = '/login/'
redirect_field_name = 'notes:login_user'
model = Note
fields = ['note_title', 'brief', 'body','private', 'note_logo']
def form_valid(self, form):
instance = form.save(commit=False)
instance.created_by = self.request.user
return super(NoteCreate, self).form_valid(form)
class NoteUpdate(LoginRequiredMixin, UpdateView):
login_url = '/login/'
redirect_field_name = 'notes:login_user'
model = Note
fields = ['note_title', 'brief', 'body', 'note_logo']
class NoteDelete(LoginRequiredMixin, DeleteView):
login_url = '/login/'
redirect_field_name = 'notes:login_user'
model = Note
success_url = reverse_lazy('notes:home')
class UserFormView(View):
form_class = UserForm
template_name = 'notes/registration_form.html'
#display blank form
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
# process form data
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
# cleaned/ normalized data
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
# returns User Objects if credentioals are correct
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('notes:home')
return render(request, self.template_name, {'form': form})
def logout_user(request):
logout(request)
return redirect('notes:home')
# process form data
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('notes:home')
else:
return render(request, 'notes/login_form.html', {'error_message': 'Invalid Login Credentials'})
return render(request, 'notes/login_form.html')
|
rajujha373/OurNoticeBoardv2.0
|
notes/views.py
|
Python
|
mit
| 4,119
|
"""
unit tests for utils module
"""
import pytest
import raccoon as rc
from raccoon.utils import assert_frame_equal
def test_assert_frame_equal():
df1 = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, columns=['a', 'b'], index=[1, 2, 3])
assert_frame_equal(df1, df1)
df2 = rc.DataFrame({'a': [1, 1, 1], 'b': [4, 5, 6]}, columns=['a', 'b'], index=[1, 2, 3])
with pytest.raises(AssertionError):
assert_frame_equal(df1, df2)
df2 = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, columns=['b', 'a'], index=[1, 2, 3])
with pytest.raises(AssertionError):
assert_frame_equal(df1, df2)
df2 = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, columns=['a', 'b'], index=[11, 12, 13])
with pytest.raises(AssertionError):
assert_frame_equal(df1, df2)
df2 = rc.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, columns=['a', 'b'], index=[1, 2, 3], sort=True)
with pytest.raises(AssertionError):
assert_frame_equal(df1, df2)
def test_data_function():
# Example function for testing
def assert_approx_equal(left_data, right_data, precision=0.00001):
for i in range(len(left_data)):
for j in range(len(left_data[i])):
assert abs(left_data[i][j] - right_data[i][j]) <= precision
df1 = rc.DataFrame({'a': [1.0, 3.0], 'b': [4.0, 6.0]}, columns=['a', 'b'], index=[1, 3])
df2 = rc.DataFrame({'a': [1.0, 3.001], 'b': [4.0, 6.001]}, columns=['a', 'b'], index=[1, 3])
# confirm fails with standard compare
with pytest.raises(AssertionError):
assert_frame_equal(df1, df2)
# passes with function and proper parameters
assert_frame_equal(df1, df2, assert_approx_equal, {'precision': 0.01})
# fails with function and precision parameter to low
with pytest.raises(AssertionError):
assert_frame_equal(df1, df2, assert_approx_equal, {'precision': 0.00001})
|
rsheftel/raccoon
|
tests/test_dataframe/test_utils.py
|
Python
|
mit
| 1,893
|
import os
import logging
from zeex.core.compat import QtGui, QtCore
from zeex.core.ui.basic.directory_ui import Ui_DirectoryViewDialog
from zeex.core.views.basic.line_edit import DirectoryPathCreateDialog
from zeex.core.utility.qtdropbox import QtDropBox
from zeex.icons import icons_rc
class DirectoryViewDialog(QtGui.QDialog, Ui_DirectoryViewDialog):
signalDownloadReady = QtCore.Signal()
def __init__(self, source_view=None, parent=None):
QtGui.QDialog.__init__(self, parent=parent)
self._source_view = source_view
self.setupUi(self)
def configure(self):
self.btnDelete.clicked.connect(self.delete)
self.btnDownload.clicked.connect(self.download)
self.btnRefresh.clicked.connect(self.refresh)
self.btnUpload.clicked.connect(self.upload)
self.btnAddFolder.clicked.connect(self.add_folder)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def upload(self):
pass
def download(self):
pass
def delete(self):
pass
def refresh(self):
pass
def add_folder(self):
pass
@property
def source_view(self):
return self._source_view
def set_source_view(self, tree_view):
self._source_view = tree_view
class DropBoxViewDialog(DirectoryViewDialog):
def __init__(self, source_view, parent):
DirectoryViewDialog.__init__(self, source_view=source_view, parent=parent)
self.dropbox = QtDropBox()
self.dialog_add_folder = DropBoxDirectoryPathCreateDialog(self.dropbox, treeview=self.treeView,
base_dirname="/", parent=self)
self.configure()
def configure(self):
self.setWindowIcon(QtGui.QIcon(':/standard_icons/add.png'))
self.setWindowTitle("DropBox")
self.dialog_add_folder.setWindowTitle("Add DropBox Folder")
self.dialog_add_folder.signalDirectoryCreated.connect(self.refresh)
super(DropBoxViewDialog, self).configure()
def upload(self):
indexes = self.source_view.selectedIndexes()
paths = list(set([self.source_view.model().filePath(i) for i in indexes]))
to = self.treeView.model().directory(self.treeView.selectedIndexes()[0])
for p in paths:
self.dropbox.upload_file(p, to)
logging.info("DropBox - Uploaded {} to {}".format(p, to))
def download(self):
index = self.source_view.selectedIndexes()[0]
to = self.source_view.model().filePath(index)
from_idx = self.treeView.selectedIndexes()[0]
from_path = self.treeView.model().filePath(from_idx)
if os.path.isfile(to):
to = os.path.dirname(to)
to = os.path.join(to, os.path.basename(from_path))
self.dropbox.download_file(from_path, to)
logging.info("DropBox - Downloaded {} to {}".format(from_path, to))
def refresh(self):
self.treeView.setModel(self.dropbox.get_filesystem_model(update=True))
def delete(self):
from_idx = self.treeView.selectedIndexes()[0]
from_path = self.treeView.model().filePath(from_idx)
# Don't delete if there are children.
child = from_idx.child(0, 0)
if child.row() >= 0:
# Way too easy to make a big mistake otherwise
raise Exception("Can't delete folder that is not empty.")
self.dropbox.con.files_delete(from_path)
logging.info("DropBox - Deleted {}".format(from_path))
self.refresh()
def show(self, *args, **kwargs):
self.refresh()
super(DirectoryViewDialog, self).show(*args, **kwargs)
def add_folder(self):
self.dialog_add_folder.show()
class DropBoxDirectoryPathCreateDialog(DirectoryPathCreateDialog):
def __init__(self, qtdropbox:QtDropBox, **kwargs):
DirectoryPathCreateDialog.__init__(self, **kwargs)
self.dropbox = qtdropbox
self.autorename = False
def execute(self):
model = self.treeview.model()
b = self.base_dirname
try:
idx = self.treeview.selectedIndexes()[0]
b = model.directory(idx)
if '.' in b[-5:]:
b = model.directory(idx.parent())
except (IndexError, Exception) as e:
assert b is not None, "Unable to verify base directory, error: {}".format(e)
if b.endswith("/"):
b = b[:-1]
directory = b + self.lineEdit.text()
self.dropbox.con.files_create_folder(directory, autorename=self.autorename)
self.signalDirectoryCreated.emit(directory)
|
zbarge/zeex
|
zeex/core/views/basic/directory.py
|
Python
|
mit
| 4,664
|
# =============================================================================
# Copyright (c) 2006, Ludo Sapiens Inc.
# All rights reserved.
#
# These coded instructions, statements, and computer programs contain
# unpublished, proprietary information and are protected by Federal copyright
# law. They may not be disclosed to third parties or copied or duplicated in
# any form, in whole or in part, without prior written consent.
# =============================================================================
import BuildSystem.Utilities
from BuildSystem.Parts.CompiledBinary import CompiledBinary
class Application(CompiledBinary):
"""An application container object defining all of the necessary fields to make an executable"""
def __init__(self, name=None, inputs=None, output=None, settings=None, variant=None, env=None):
CompiledBinary.__init__(self, name=name, inputs=inputs, output=output, settings=settings, variant=variant, env=env)
def run(self, args=None):
return BuildSystem.Utilities.run(self, args=args)
def deploy(self, dstDir=None):
return BuildSystem.Utilities.deploy(self, dstDir)
|
LudoSapiens/Dev
|
Tools/BS/BuildSystem/Parts/Application.py
|
Python
|
mit
| 1,143
|
from croplands_api import create_app, db, limiter
from croplands_api.models import User
import unittest
from croplands_api.tasks.classifications import build_classifications_result, \
compute_image_classification_statistics
import json
class TestHighResImage(unittest.TestCase):
app = None
def setUp(self):
self.app = TestHighResImage.app
with self.app.app_context():
limiter.enabled = False
db.create_all()
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
@classmethod
def setUpClass(cls):
super(TestHighResImage, cls).setUpClass()
cls.app = create_app('Testing')
# def test_get_image(self):
# with self.app.app_context():
# lat= 35.198136597203195
# lon = -111.64765298366547
#
# get_image(lat, lon, 18)
#
# image = Image.query.first()
# self.assertAlmostEqual(lat, image.lat, delta=0.001)
# self.assertAlmostEqual(lon, image.lon, delta=0.001)
def test_post_classification(self):
with self.app.app_context():
with self.app.test_client() as c:
headers = [('Content-Type', 'application/json')]
data = {'lat': 35.198136597203195, 'lon': -111.64765298366547}
post = c.post('/api/locations', headers=headers, data=json.dumps(data))
response = json.loads(post.data)
image_data = {'date_acquired': '2015-01-01', 'lat': 0, 'lon': 0,
'location_id': response['id'], 'bearing': 0, 'url': 'asdf'}
c.post('/api/images', headers=headers, data=json.dumps(image_data))
headers = [('Content-Type', 'application/json')]
response = c.get('/api/images', headers=headers)
image_id = json.loads(response.data)['objects'][0]['id']
data = {
"image": image_id,
"classification": 3
}
response = c.post('/api/image_classifications', headers=headers,
data=json.dumps(data))
self.assertEqual(response.status_code, 201)
def test_classification_results(self):
with self.app.test_client() as c:
headers = [('Content-Type', 'application/json')]
data = {'lat': 35.312, 'lon': -111.112}
post = c.post('/api/locations', headers=headers, data=json.dumps(data))
response = json.loads(post.data)
image_data = {'date_acquired': '2015-01-01', 'lat': 0, 'lon': 0,
'location_id': response['id'], 'bearing': 0, 'url': 'asdf'}
c.post('/api/images', headers=headers, data=json.dumps(image_data))
response = c.get('/api/images', headers=headers)
image_id = json.loads(response.data)['objects'][0]['id']
data = {
"image": image_id,
"classification": 3
}
c.post('/api/image_classifications', headers=headers,
data=json.dumps(data))
data = {
"image": image_id,
"classification": 3
}
c.post('/api/image_classifications', headers=headers,
data=json.dumps(data))
data = {
"image": image_id,
"classification": 3
}
response = c.post('/api/image_classifications', headers=headers,
data=json.dumps(data))
self.assertEqual(response.status_code, 201)
compute_image_classification_statistics(image_id)
build_classifications_result()
# def test_get_google_street_view_image(self):
# with self.app.app_context():
# data = {'lat': 35.198136597203195, 'lon': -111.64765298366547}
# get_google_street_view_image(**data)
# def test_directions(self):
# with self.app.app_context():
# origin_lat = 35.198136597203195
# origin_lon = -111.64765298366547
#
# destination_lat = 35.198136597203195
# destination_lon = -111.14765298366547
#
# # polyline = get_directions(origin_lat, origin_lon, destination_lat, destination_lon)
def test_classification_user(self):
with self.app.test_client() as c:
me = {'first': 'Justin', 'last': 'Poehnelt', 'affiliation': 'USGS',
'password': 'woot1LoveCookies!', 'email': 'jpoehnelt+test@usgs.gov'}
headers = [('Content-Type', 'application/json')]
c.post('/auth/register', headers=headers, data=json.dumps(me))
# verify user
User.from_email(me['email'])
# login
response = c.post('/auth/login', headers=headers,
data=json.dumps({'email': me['email'], 'password': me['password']}))
response.json = json.loads(response.data)
self.assertIn('data', response.json)
self.assertIn('token', response.json['data'])
token = response.json['data']['token']
headers = [('Content-Type', 'application/json'), ('authorization', 'bearer ' + token)]
data = {'lat': 35.312, 'lon': -111.112}
post = c.post('/api/locations', headers=headers, data=json.dumps(data))
response = json.loads(post.data)
image_data = {'date_acquired': '2015-01-01', 'lat': 0, 'lon': 0,
'location_id': response['id'], 'bearing': 0, 'url': 'asdf'}
c.post('/api/images', headers=headers, data=json.dumps(image_data))
response = c.get('/api/images', headers=headers)
image_id = json.loads(response.data)['objects'][0]['id']
data = {
"image": image_id,
"classification": 3
}
response = c.post('/api/image_classifications', headers=headers,
data=json.dumps(data))
classification = json.loads(response.data)
self.assertIsNotNone(classification['user_id'])
self.assertEqual(classification['user_id'], 1)
|
justinwp/croplands
|
tests/test_high_res_imagery.py
|
Python
|
mit
| 6,313
|
#!/usr/bin/env python
# standard library imports
import os
import sys
import logging
import argparse
import subprocess
# 3rd party imports
# KBase imports
try:
import biokbase.Transform.script_utils as script_utils
except ImportError:
from . import kbase_utils as script_utils
if sys.version.startswith('3'):
unicode = str
def transform(workspace_service_url=None, workspace_name=None,
object_name=None, output_file_name=None, input_directory=None,
working_directory=None, has_replicates=None, input_mapping=None, format_type=None,
level=logging.INFO, logger=None):
"""
Converts SampleProperty TSV file to json string of KBaseEnigmaMetals.SamplePropertyMatrix type.
Args:
workspace_service_url: URL for a KBase Workspace service where KBase objects.
are stored.
workspace_name: The name of the destination workspace.
object_name: The destination object name.
output_file_name: A file name where the output JSON string should be stored.
If the output file name is not specified the name will
default to the name of the input file appended with
'_output.json'.
input_directory: The directory where files will be read from.
working_directory: The directory the resulting json file will be
written to.
has_replicates: 0 if the input file contains marked series of replicates,
1 if the input file contains non-marked series of replicates,
2 if the input file contains no replicates.
input_mapping: JSON string mapping of input files to expected types.
If you don't get this you need to scan the input
directory and look for your files.
format_type: Mannually defined type of TSV file format.
Returns:
JSON files on disk that can be saved as a KBase workspace objects.
Authors:
Roman Sutormin, Alexey Kazakov
"""
if logger is None:
logger = script_utils.stderrlogger(__file__)
logger.info("Starting conversion of SampleProperty TSV to KBaseEnigmaMetals.SamplePropertyMatrix")
# token = os.environ.get('KB_AUTH_TOKEN')
if not working_directory or not os.path.isdir(working_directory):
raise Exception("The working directory {0} is not a valid directory!"
.format(working_directory))
classpath = ["$KB_TOP/lib/jars/kbase/transform/kbase_transform_deps.jar",
"$KB_TOP/lib/jars/apache_commons/commons-cli-1.2.jar",
"$KB_TOP/lib/jars/apache_commons/commons-lang3-3.1.jar",
"$KB_TOP/lib/jars/ini4j/ini4j-0.5.2.jar",
"$KB_TOP/lib/jars/jackson/jackson-annotations-2.2.3.jar",
"$KB_TOP/lib/jars/jackson/jackson-core-2.2.3.jar",
"$KB_TOP/lib/jars/jackson/jackson-databind-2.2.3.jar",
"$KB_TOP/lib/jars/jetty/jetty-all-7.0.0.jar",
"$KB_TOP/lib/jars/jna/jna-3.4.0.jar",
"$KB_TOP/lib/jars/kbase/auth/kbase-auth-0.3.1.jar",
"$KB_TOP/lib/jars/kbase/common/kbase-common-0.0.10.jar",
"$KB_TOP/lib/jars/servlet/servlet-api-2.5.jar",
"$KB_TOP/lib/jars/syslog4j/syslog4j-0.9.46.jar",
"$KB_TOP/lib/jars/kbase/workspace/WorkspaceClient-0.2.0.jar"]
mc = "us.kbase.kbaseenigmametals.SamplePropertyMatrixUploader"
argslist = ["--workspace_service_url {0}".format(workspace_service_url),
"--workspace_name {0}".format(workspace_name),
"--object_name {0}".format(object_name),
"--input_directory {0}".format(input_directory),
"--has_replicates {0}".format(has_replicates),
"--working_directory {0}".format(working_directory)]
if output_file_name:
argslist.append("--output_file_name {0}".format(output_file_name))
if input_mapping:
argslist.append("--input_mapping {0}".format(input_mapping))
argslist.append("--format_type {0}".format(format_type))
arguments = ["java", "-classpath", ":".join(classpath), mc, " ".join(argslist)]
logger.info(arguments)
# need shell in this case because the java code is depending on finding the KBase token in the environment
tool_process = subprocess.Popen(" ".join(arguments), stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
logger.info(stdout)
if stderr is not None and len(stderr) > 0:
logger.error(stderr)
if tool_process.returncode:
logger.error("Transformation from TSV.SampleProperty to KBaseEnigmaMetals.SamplePropertyMatrix failed on {0}".format(input_directory))
sys.exit(1)
logger.info("Conversion completed.")
def main():
script_details = script_utils.parse_docs(transform.__doc__)
parser = argparse.ArgumentParser(prog=__file__,
description=script_details["Description"],
epilog=script_details["Authors"])
parser.add_argument('--workspace_service_url',
help=script_details["Args"]["workspace_service_url"],
action='store', type=str, nargs='?', required=True)
parser.add_argument('--workspace_name',
help=script_details["Args"]["workspace_name"],
action='store', type=str, nargs='?', required=True)
parser.add_argument("--object_name",
help=script_details["Args"]["object_name"],
action='store', type=str, nargs='?', required=True)
parser.add_argument('--output_file_name',
help=script_details["Args"]["output_file_name"],
action='store', type=str, nargs='?', default=None,
required=False)
parser.add_argument('--input_directory',
help=script_details["Args"]["input_directory"],
action='store', type=str, nargs='?', required=True)
parser.add_argument("--working_directory",
help=script_details["Args"]["working_directory"],
action='store', type=str, nargs='?', required=True)
parser.add_argument("--has_replicates",
help=script_details["Args"]["has_replicates"],
action='store', type=str, nargs='?', required=True)
parser.add_argument('--input_mapping',
help=script_details["Args"]["input_mapping"],
action='store', type=unicode, nargs='?', default=None,
required=False)
# custom arguments specific to this uploader
parser.add_argument('--format_type',
help=script_details["Args"]["format_type"],
action='store', type=str, required=False)
args, unknown = parser.parse_known_args()
logger = script_utils.stderrlogger(__file__)
logger.debug(args)
try:
transform(workspace_service_url=args.workspace_service_url,
workspace_name=args.workspace_name,
object_name=args.object_name,
output_file_name=args.output_file_name,
input_directory=args.input_directory,
working_directory=args.working_directory,
has_replicates=args.has_replicates,
input_mapping=args.input_mapping,
format_type=args.format_type,
logger=logger)
except Exception as e:
logger.exception(e)
sys.exit(1)
# called only if script is run from command line
if __name__ == "__main__": # pragma: no cover
main()
|
kbase/transform
|
plugins/scripts/upload/trns_transform_TSV_SampleProperty_to_KBaseEnigmaMetals_SamplePropertyMatrix.py
|
Python
|
mit
| 7,966
|
"""
Generate dungeon using BSP algorithm
"""
import random
import sys
from utils import Box, Point
class Dungeon(object):
"""
Driver object for building the dungeon
"""
def __init__(self, x_max, y_max, room_min=3, edge_min=1, max_depth=4):
self.x_max = x_max
self.y_max = y_max
self.room_min = room_min
self.edge_min = edge_min
self.max_depth = max_depth
self.matrix = [['#' for _ in range(x_max)] for _ in range(y_max)]
def get_tile(self, point):
"""Wrap access to the internal map matrix
"""
print "Getting tile for %s" % repr(point)
return self.matrix[point.y][point.x]
def set_tile(self, point, glyph="."):
"""Wrap access to the internal map matrix
"""
self.matrix[point.y][point.x] = glyph
def __str__(self):
retval = ''
for row in self.matrix:
for cel in row:
retval += cel
retval += '\n'
return retval
def mk_room(self, bounding_box):
"""Make a room that fits within the given box
"""
if (bounding_box.top + self.edge_min + self.room_min
> bounding_box.bottom):
raise ValueError("Region too small to make room")
if (bounding_box.left + self.edge_min + self.room_min
> bounding_box.right):
raise ValueError("Region too small to make room")
h_max = bounding_box.bottom - bounding_box.top - self.edge_min
w_max = bounding_box.right - bounding_box.left - self.edge_min
height = random.randint(self.room_min, h_max)
width = random.randint(self.room_min, w_max)
# we now have a room height and width that fit within our bounding box.
# Just need to decide where to put the top left corner
y_start = random.randint(bounding_box.top + self.edge_min,
bounding_box.bottom - height)
x_start = random.randint(bounding_box.left + self.edge_min,
bounding_box.right - width)
room = Box(y_start, x_start, y_start + height - 1, x_start + width - 1)
for i in range(y_start, y_start + height):
for j in range(x_start, x_start + width):
self.set_tile(Point(j, i))
return room
def line_connection(self, room_1, room_2, split, is_vertical):
"""Connect two sections of the maze using a straight line. The
corridor will extend straight up and down (or left and right, depending
on the :param is_vertical: setting) until it hits a non-wall tile.
"""
if is_vertical:
# Vertical splitting need a horizontal (i.e. constant Y) connecting
# line. The conecting line is always perpendicular to the
# splitting line.
overlap_top = max(room_1.top, room_2.top)
overlap_bottom = min(room_1.bottom, room_2.bottom)
print "ot: %s, ob: %s" % (overlap_top, overlap_bottom)
if overlap_bottom < overlap_top:
raise ValueError("No vertical overlap between %s and %s"
% (room_1, room_2))
start_y = random.randint(overlap_top, overlap_bottom)
print "picked vertical overlap point %s between %s and %s" % (
start_y, room_1, room_2)
x = split
while self.get_tile(Point(x, start_y)) == '#':
self.set_tile(Point(x, start_y), glyph='-')
x -= 1
x = split + 1
while self.get_tile(Point(x, start_y)) == '#':
self.set_tile(Point(x, start_y), glyph='-')
x += 1
else:
# Horizontal splitting line need a vertical (i.e. constant X)
# connecting line. The connecting line is always perpendicular to
# the splitting line
overlap_left = max(room_1.left, room_2.left)
overlap_right = min(room_1.right, room_2.right)
print "ol: %s, or: %s" % (overlap_left, overlap_right)
if overlap_right < overlap_left:
raise ValueError("No horizontal overlap between %s and %s"
% (room_1, room_2))
start_x = random.randint(overlap_left, overlap_right)
print("picked horizontal overlap point %s between %s and %s"
% (start_x, room_1, room_2))
y = split
while self.get_tile(Point(start_x, y)) == '#':
self.set_tile(Point(start_x, y), glyph='|')
y -= 1
y = split + 1
while self.get_tile(Point(start_x, y)) == '#':
self.set_tile(Point(start_x, y), glyph='|')
y += 1
def bend_connection(self, room_1, room_2):
"""Make a connection with a right angle in it - used when the two
rooms don't share a facing side
"""
bend_point = Point(random.randint(room_1.left, room_1.right),
random.randint(room_2.top, room_2.bottom))
if room_1.bottom < room_2.top:
draw_point = bend_point
while self.get_tile(draw_point) == '#':
self.set_tile(draw_point, glyph='|')
draw_point = Point(draw_point.x, draw_point.y-1)
else:
draw_point = bend_point
while self.get_tile(draw_point) == '#':
self.set_tile(draw_point, glyph='|')
draw_point = Point(draw_point.x, draw_point.y+1)
if room_1.left < room_2.right:
# Move off our starting point, so we start on a wall
draw_point = Point(bend_point.x+1, bend_point.y)
while self.get_tile(draw_point) == '#':
self.set_tile(draw_point, glyph='-')
draw_point = Point(draw_point.x+1, draw_point.y)
else:
# Move off our starting point, so we start on a wall
draw_point = Point(bend_point.x-1, bend_point.y)
while self.get_tile(draw_point) == '#':
self.set_tile(draw_point, glyph='-')
draw_point = Point(draw_point.x-1, draw_point.y)
def mk_dungeon(self, bounding_box, depth=0):
"""Recursively generate the dungeon, building rooms as we go down and
connecting them as we go up
"""
print "%s" % repr(bounding_box)
edge_buffer = self.edge_min + self.room_min
room = None
if ((depth >= self.max_depth)
or (bounding_box.top + edge_buffer
> bounding_box.bottom - edge_buffer)
or (bounding_box.left + edge_buffer
> bounding_box.right - edge_buffer)):
room = self.mk_room(bounding_box)
return room
is_vertical = bool(random.randint(0, 1))
if is_vertical:
split = random.randint(bounding_box.left + edge_buffer,
bounding_box.right - edge_buffer)
box_1 = Box(bounding_box.top, bounding_box.left,
bounding_box.bottom, split)
box_2 = Box(bounding_box.top, split, bounding_box.bottom,
bounding_box.right)
else:
# horizontal split
split = random.randint(bounding_box.top + edge_buffer,
bounding_box.bottom - edge_buffer)
box_1 = Box(bounding_box.top, bounding_box.left, split,
bounding_box.right)
box_2 = Box(split, bounding_box.left, bounding_box.bottom,
bounding_box.right)
# Room 2 will always be down or right from room 1
room_1 = self.mk_dungeon(box_1, depth + 1)
room_2 = self.mk_dungeon(box_2, depth + 1)
# Now we have two "rooms" (which may be sub-rooms connected by a
# corridor), and we need to connect them.
# First see if they share an edge
# print self
try:
self.line_connection(room_1, room_2, split, is_vertical)
except ValueError:
self.bend_connection(room_1, room_2)
# print self
return Box(
min(room_1.top, room_2.top),
min(room_1.left, room_2.left),
max(room_1.bottom, room_2.bottom),
max(room_1.right, room_2.right)
)
def main():
if len(sys.argv) == 2:
seed = int(sys.argv[1])
else:
seed = random.randint(0, sys.maxint)
print "Seeding with %s" % seed
random.seed(seed)
dungeon = Dungeon(50, 50, max_depth=6)
dungeon.mk_dungeon(Box(0, 0, 49, 49))
print dungeon
if __name__ == '__main__':
main()
|
not-napoleon/mazes
|
bsp.py
|
Python
|
mit
| 8,744
|
#Brian Stamm
#CSC - 110
#Howework 8 - Encryption/Decryption
#11.18.14
import sys
import os
INSTRUCTION_CHOICE = 1 #GLOBAL CHOICES
DECRYPT_CHOICE = 2
ENCRYPT_CHOICE = 3
QUIT_CHOICE = 4
LETTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.!?,\'\";:-' #GLOBAL LIST - contains all letters/numbers/symbols encrypted
def main():
my_key = 'HIJKLMNOPQRSTUVWXYZ0123456789.!?,\'\";:-abcdefghijklmnopqrstuvwxyzABCDEFG' #KEY for LETTERS
choice = 0
pre_instruction(my_key) #Instructing on menu selection, also contains check to make sure my_key and LETTERS line up accurately
while choice != QUIT_CHOICE: #MENU Loop
display_menu()
choice = int(input("Enter the number of your choice: "))
if choice == INSTRUCTION_CHOICE:
print()
instruction()
elif choice == DECRYPT_CHOICE:
print()
decrypt(my_key)
elif choice == ENCRYPT_CHOICE:
print()
encrypt(my_key)
elif choice == QUIT_CHOICE:
closure()
sys.exit()
else:
print("Sorry, I didn't understant that. Please try again.")
print()
def pre_instruction(key):
print("Welcome to Little Orphan Annie's Encrpytion Program, brought to you by Ovaltine.")
print("Be sure to pick one of the numbers from the menu:")
keyList = list(key) #Following checks to ensure both LETTERS and my_list have all same characters within it. if not, shuts down.
lettersList = list(LETTERS)
keyList.sort()
lettersList.sort()
if keyList != lettersList:
print("There's an error with the program's key. Please contact Olvatine.")
sys.exit()
else:
print()
def instruction(): #Detailed instructions
print("This program is designed to either encrypt a simple text file,")
print("or it will decrypt a file it has encrypted. It uses a special key designed")
print("only for this program, so it cannot decipher a code it does not have the key for.")
print()
print("A menu will display various options. Currently, you are in the \"Detailed")
print("Instructions\" Section. In the \"Decrypt file\", it will walk you through")
print("how to decrypt a selected file. The \"Encrypt file\" will help with encrypting.")
print()
print("After either encrypting or decrypting the file, this program creates a new")
print("file in the same folder as the initial program. To help distinguish from")
print("the original, the new file will either have a 'E' at the end of the file")
print("named for \"Encrypted,\", or 'D' for \"Decrypted.\"")
print()
print("Be sure when you are asked to write the file name, be sure to to put the")
print("extention at the end as well. For example, if your file is named \"Program\",")
print("you will need to remember to write out \"Program.txt\"")
print()
print("That's all for now! Back to the menu -")
print()
def display_menu(): #Displays menu
print("Display menu:")
print()
print("1) Detailed Instructions")
print("2) Decrypt file")
print("3) Encrypt file")
print("4) Quit")
print('')
def decrypt(my_key): #Decyption
file_name = input("What is the name of the file we are decrypting, with extension? ")
infile = open(file_name, 'r')
temp_file = open('temp.txt', 'w') #Creates temp file
new_name = rename_d(file_name) #Function to create new name for decrypted file, with D at end
line = infile.readline()
while line != '':
temp_file.write(line.translate({ord(x):y for (x,y) in zip(my_key,LETTERS)})) #LOOP that deciphers (same in Encryption, but my_key and LETTERS flipped
line=infile.readline()
infile.close()
temp_file.close()
os.rename('temp.txt', new_name) #Renames the temp file as something permnament
def encrypt(my_key): #Encryption, same as previous, except
file_name = input("What is the name of the file we are encrpyting, with extension? ")
infile = open(file_name, 'r')
temp_file = open('temp.txt', 'w')
new_name = rename_e(file_name) #HERE, different function used for naming the file
line = infile.readline()
while line != '':
temp_file.write(line.translate({ord(x):y for (x,y) in zip(LETTERS,my_key)})) #And here, loop is basically same, except LETTERS and my_key are flipped
line=infile.readline()
infile.close()
temp_file.close()
os.rename('temp.txt', new_name)
def rename_d(file): #Creates new file name for DECRYPTION
new_name = file[0:(len(file)-4)]+'D.txt'
print("Your decrypted file is named: ",new_name)
print("And back to the main menu.")
print()
return new_name
def rename_e(file): #Creates new file name for ENCRYPTION
new_name = file[0:(len(file)-4)]+'E.txt'
print("Your encrypted file is named: ",new_name)
print("And bakc to the main menu.")
print()
return new_name
def closure(): #Farewell
print()
print("Thanks for using Little Orphan Annie's Encrpytion Program!")
print("And don't forget, More Ovaltine, Please!")
sys.exit()
main()
|
bkstamm67/python
|
pastHW/encrypt.py
|
Python
|
mit
| 5,310
|
import logging
from multiprocessing import Queue
from multiprocessing.managers import DictProxy
from time import time
from typing import Dict, List, Union, Any, Iterable
import numpy as np
from Engine.Communication.robot_state import RobotState
from Engine.Tracker.Filters import RobotFilter
from Engine.Tracker.Filters.ball_kalman_filter import BallFilter
from Util import Pose, Position
from Util.constant import FieldSide
from Util.geometry import rotate, wrap_to_pi
from Util.team_color_service import TeamColorService
from config.config import Config
config = Config()
class Tracker:
def __init__(self, vision_state: DictProxy, ui_send_queue: Queue):
self.logger = logging.getLogger(self.__class__.__name__)
self.ui_send_queue = ui_send_queue
self.vision_state = vision_state
self._blue_team = [RobotFilter(robot_id) for robot_id in range(config['ENGINE']['max_robot_id'])]
self._yellow_team = [RobotFilter(robot_id) for robot_id in range(config['ENGINE']['max_robot_id'])]
self._balls = [BallFilter(ball_id) for ball_id in range(config['ENGINE']['max_ball_on_field'])]
self._camera_capture_time = [-1 for _ in range(config['ENGINE']['number_of_camera'])]
self.neg_side = True if config['COACH']['on_negative_side'] else False
self.our_color = config['COACH']['our_color']
self.last_warning_time = None
def update(self) -> Dict[str, List[Dict[str, Any]]]:
for frame in self.camera_frames:
self._log_new_robots_on_field(frame)
self._camera_capture_time[frame['camera_id']] = frame['t_capture']
self._update(frame)
self._remove_undetected()
return self.game_state
def _update(self, detection_frame: Dict[str, List[Dict[str, Any]]]):
for robot_obs in detection_frame.get('robots_blue', ()):
if TeamColorService().our_team_color == TeamColorService.BLUE and robot_obs['robot_id'] > 7:
continue
obs = np.array([robot_obs['x'], robot_obs['y'], robot_obs['orientation']])
self._blue_team[robot_obs['robot_id']].update(obs, detection_frame['t_capture'])
for robot_obs in detection_frame.get('robots_yellow', ()):
if TeamColorService().our_team_color == TeamColorService.YELLOW and robot_obs['robot_id'] > 7:
continue
obs = np.array([robot_obs['x'], robot_obs['y'], robot_obs['orientation']])
self._yellow_team[robot_obs['robot_id']].update(obs, detection_frame['t_capture'])
for ball_obs in detection_frame.get('balls', ()):
obs = np.array([ball_obs['x'], ball_obs['y']])
closest_ball = self.find_closest_ball_to_observation(obs)
if closest_ball:
closest_ball.update(obs, detection_frame['t_capture'])
else:
if self.last_warning_time is None or time() - self.last_warning_time > 5:
self.last_warning_time = time()
self.logger.debug('The tracker is not able to assign some observations to a ball. '
'Try to increase the maximal number of ball on the field or recalibrate the vision.')
def predict(self, robot_state: RobotState, dt: float):
velocity_commands = [Pose() for _ in range(len(self._our_team))]
for packet in robot_state.packet:
velocity_commands[packet.robot_id] = packet.command
for robot in self._our_team:
if robot.orientation is not None:
velocity = self._put_in_world_referential(robot.orientation, velocity_commands[robot.id])
robot.predict(dt, next_velocity=velocity.to_array())
for robot in self._their_team:
robot.predict(dt)
for ball in self._balls:
ball.predict(dt)
def _remove_undetected(self):
for team_color, robots in self.active_robots.items():
undetected_robots = set()
for robot in robots:
if time() - robot.last_update_time > config['ENGINE']['max_undetected_robot_time']:
undetected_robots.add(robot.id)
robot.reset()
if undetected_robots:
self.logger.debug('%s robot(s) undetected for more than %.2f seconds: %r',
team_color.capitalize(),
config['ENGINE']['max_undetected_robot_time'],
undetected_robots)
for ball in self.active_balls:
if time() - ball.last_update_time > config['ENGINE']['max_undetected_ball_time']:
ball.reset()
self.logger.debug('Ball %d was undetected for more than %.2f seconds.',
ball.id,
config['ENGINE']['max_undetected_ball_time'])
def _put_in_world_referential(self, orientation: float, cmd: Pose) -> Pose:
if self.neg_side:
cmd.position = rotate(cmd.position, -np.pi - orientation)
cmd.x *= -1
cmd.orientation *= -1
else:
cmd.position = rotate(cmd.position, orientation)
return cmd
def _log_new_robots_on_field(self, detection_frame: Dict[str, List[Dict[str, Any]]]):
new_robots = {'blue': set(), 'yellow': set()}
for robot_obs in detection_frame.get('robots_blue', ()):
if not self._blue_team[robot_obs['robot_id']].is_active: new_robots['blue'].add(robot_obs['robot_id'])
if new_robots['blue']:
self.logger.debug('Blue robot(s) detected: %r', new_robots['blue'])
for robot_obs in detection_frame.get('robots_yellow', ()):
if not self._yellow_team[robot_obs['robot_id']].is_active: new_robots['yellow'].add(robot_obs['robot_id'])
if new_robots['yellow']:
self.logger.debug('Yellow robot(s) detected: %r', new_robots['yellow'])
@property
def camera_frames(self) -> List[Dict[str, Any]]:
valid_frames = [frame for frame in self.vision_state if self._is_valid_frame(frame)]
valid_frames = [Tracker._remove_ignored_side(frame) for frame in valid_frames]
if self.neg_side:
valid_frames = [Tracker._change_frame_side(frame) for frame in valid_frames]
return sorted(valid_frames, key=lambda frame: frame['t_capture'])
@staticmethod
def _change_frame_side(detection_frame: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:
for robot_obs in detection_frame.get('robots_blue', []) + detection_frame.get('robots_yellow', []):
robot_obs['x'] *= -1
robot_obs['orientation'] = wrap_to_pi(np.pi - robot_obs['orientation'])
for ball_obs in detection_frame.get('balls', ()):
ball_obs['x'] *= -1
return detection_frame
@staticmethod
def _remove_ignored_side(frame):
ignore_ball_in = Config()['ENGINE']['ignore_balls_in']
if ignore_ball_in is None:
return frame
for label in ['robots_blue', 'robots_yellow', 'balls']:
if label in frame:
frame[label] = [r for r in frame[label] if (ignore_ball_in == FieldSide.POSITIVE and r['x'] < 0) or (ignore_ball_in == FieldSide.NEGATIVE and r['x'] >= 0)]
return frame
def _is_valid_frame(self, frame):
if frame:
disabled_camera_id = config['ENGINE']['disabled_camera_id']
cam_id = frame['camera_id']
last_capture_time = self._camera_capture_time[cam_id]
return frame['t_capture'] > last_capture_time and cam_id not in disabled_camera_id
@property
def _our_team(self):
if self.our_color == 'yellow':
our_team = self._yellow_team
else:
our_team = self._blue_team
return our_team
@property
def _their_team(self):
if self.our_color == 'yellow':
their_team = self._blue_team
else:
their_team = self._yellow_team
return their_team
@property
def active_robots(self):
return {'blue': [robot for robot in self._blue_team if robot.is_active],
'yellow': [robot for robot in self._yellow_team if robot.is_active]}
@property
def active_balls(self):
return [ball for ball in self._balls if ball.is_active]
@property
def inactive_balls(self):
return [ball for ball in self._balls if not ball.is_active]
@property
def game_state(self) -> Dict[str, Union[float, List[Dict[str, Any]]]]:
game_fields = dict()
game_fields['timestamp'] = time()
game_fields['blue'] = self.blue_team
game_fields['yellow'] = self.yellow_team
game_fields['balls'] = self.balls
return game_fields
@property
def balls(self) -> List[Dict[str, Any]]:
return Tracker._format_entities(sorted(self.active_balls, key=lambda b: b.first_update_time))
@property
def blue_team(self) -> List[Dict[str, Any]]:
return Tracker._format_entities(self.active_robots['blue'])
@property
def yellow_team(self) -> List[Dict[str, Any]]:
return Tracker._format_entities(self.active_robots['yellow'])
@staticmethod
def _format_entities(entities: Iterable[Union[RobotFilter, BallFilter]]) -> List[Dict[str, Any]]:
formatted_list = []
for entity in entities:
fields = dict()
if type(entity) is RobotFilter:
fields['pose'] = Pose.from_values(*entity.pose)
fields['velocity'] = Pose.from_values(*entity.velocity)
elif type(entity) is BallFilter:
fields['position'] = Position(*entity.position)
fields['velocity'] = Position(*entity.velocity)
else:
raise TypeError('Invalid type provided: {}'.format(type(entity)))
fields['id'] = entity.id
formatted_list.append(fields)
return formatted_list
def find_closest_ball_to_observation(self, obs: np.ndarray) -> BallFilter:
if any(self.active_balls):
balls_position = np.array([ball.position for ball in self.active_balls])
dists = np.linalg.norm(balls_position - obs, axis=1)
idx = np.argmin(dists).view(int)
closest_ball = self.active_balls[idx]
if dists[idx] > config['ENGINE']['max_ball_separation']:
if len(self.inactive_balls) > 0:
closest_ball = self.inactive_balls[0]
self.logger.debug('New ball detected: ID %d.', closest_ball.id)
else:
closest_ball = None
else:
closest_ball = self.inactive_balls[0]
self.logger.debug('A ball was detected on the field.')
return closest_ball
|
RoboCupULaval/StrategyIA
|
Engine/Tracker/tracker.py
|
Python
|
mit
| 10,914
|
#!/usr/bin/env python
from ithkuil.morphology.words import Factory
from ithkuil.morphology.exceptions import IthkuilException
import sys
words = sys.argv[1:]
if not words:
words = sys.stdin.read().split()
for word in words:
try:
wordObj = Factory.parseWord(word)
print(wordObj.word, ':', wordObj.abbreviatedDescription())
except IthkuilException as e:
print(word, ': ERROR: %s', str(e))
|
fizyk20/ithkuil
|
gloss_sentence.py
|
Python
|
mit
| 427
|
"""
Example gevent server loop
Might add config switches etc here later, to make this work
with custom settings and handlers.
"""
import gevent.monkey
gevent.monkey.patch_thread()
from gevent.server import StreamServer
from gmcbs.protocol import ClientHandler
import logging
import socket
import sys
def tcp_keepalive_linux(sock, keepidle, keepintvl, keepcnt):
""" Set TCP keepalive options on Linux """
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, keepidle)
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, keepintvl)
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, keepcnt)
def tcp_keepalive_osx(sock, keepidle, keepintvl, keepcnt):
""" Set TCP keepalive options on MacOSX (and maybe real BSDs?) """
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, 0x10, keepintvl)
class ConnectionHelper(object):
""" Helper class / callable for single client connection """
# this is still a class so that we can pass in config dict and others
def __init__(self, handler_class=ClientHandler, handler_kwargs=None, tcp_keepalive=True):
self.log = logging.getLogger(self.__class__.__name__)
# XXX: this is still a bit hacky. find out if anyone has made a true cross-platform tcp keepalive setting
if tcp_keepalive is True:
tcp_keepalive = (1800, 30, 3)
if tcp_keepalive:
if sys.platform == "darwin":
self.set_keepalive = lambda x: tcp_keepalive_osx(x, *tcp_keepalive)
else:
# sys.platform == "linux2"
self.set_keepalive = lambda x: tcp_keepalive_linux(x, *tcp_keepalive)
else:
self.set_keepalive = lambda x: None
self.handler_class = handler_class
self.handler_kwargs = dict(handler_kwargs or [])
def __call__(self, sock, addr):
addrinfo = "%s:%s" % addr
self.log.info("connected %s", addrinfo)
self.set_keepalive(sock)
handler = self.handler_class(sock, **self.handler_kwargs)
try:
handler.serve_client()
finally:
handler.log_stats("DISCONNECT")
# needs some error handling (revisiting after seeing this in full action)
self.log.info("disconnected %s %s", sock, addrinfo)
sock.shutdown()
sock.close()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
if 'debug' in argv:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(asctime)s %(levelname)s %(threadName)s %(message)s')
connhandler = ConnectionHelper()
server = StreamServer(('127.0.0.1', 11212), connhandler)
logging.info("server: %r", server)
server.serve_forever()
if __name__ == '__main__':
sys.exit(main())
|
jvtm/gevent-memcached-server
|
gmcbs/server.py
|
Python
|
mit
| 2,911
|
# -*- coding: utf-8 -*-
import io
import sys
import json
import ast
import operator
from itertools import islice
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
from ate_rule_learn import write_json
CONST_SENTENCE = "SENTENCE"
CONST_ASPECT_TERMS = "ASPECT_TERMS"
CONST_ASPECT_TERMS_POLARITY = "ASPECT_TERMS_POLARITY"
CONST_CONTEXT_WINDOW_SIZE = 10
# Performs text formatting on sentence before training/classification.
def remove_unwanted_tokens(sentence):
sentence = sentence.replace(",", " ")
sentence = sentence.replace(u"।", "")
sentence = sentence.replace("'", "")
sentence = sentence.replace("-", " ")
sentence = sentence.replace("(", " ")
sentence = sentence.replace(")", " ")
sentence = sentence.replace(".", "")
sentence = sentence.replace(":", " ")
sentence = sentence.strip()
return sentence
# Returns the list of training sentences from the train file
# Returns sentence id, review, aspect terms and their associated polarity from
# each sentence.
def get_train_sentences(file_path):
sentences = {}
with io.open(file_path, 'r', encoding='utf8') as f:
# skipping first line
list(islice(f, 1))
# reading from line 2
for line in iter(f):
line = line.rstrip()
if line:
sentence_data = line.split('#')
# collecting aspect terms
aspect_terms = []
aspect_terms_str = sentence_data[5]
aspect_terms_str = remove_unwanted_tokens(aspect_terms_str)
aspect_terms_temp = aspect_terms_str.split('&')
aspect_terms_temp = remove_blank_tokens(aspect_terms_temp)
# skipping the line if no aspect terms found
if len(aspect_terms_temp) == 0:
continue
# sorting aspect terms based on their 'from' value
aspect_terms_map ={}
aspect_terms_from = sentence_data[2].split('&')
for idx in range(len(aspect_terms_temp)):
aspect_term = aspect_terms_temp[idx]
aspect_term_from = aspect_terms_from[idx]
aspect_terms_map[aspect_term] = int(aspect_term_from)
aspect_terms_seq_sorted = sorted(aspect_terms_map.items(), key=operator.itemgetter(1))
for aspect_term in aspect_terms_seq_sorted:
aspect_terms.append(aspect_term[0])
sentence_id = sentence_data[0]
sentence = sentence_data[1]
sentence = remove_unwanted_tokens(sentence)
# collecting aspect terms polarity
aspect_terms_polarity = []
aspect_terms_polarity_temp = sentence_data[4].split('&')
for aspect_term_polarity in aspect_terms_polarity_temp:
aspect_terms_polarity.append(aspect_term_polarity)
sentence_map = {}
sentence_map[CONST_SENTENCE] = sentence
sentence_map[CONST_ASPECT_TERMS] = aspect_terms
sentence_map[CONST_ASPECT_TERMS_POLARITY] = aspect_terms_polarity
sentences[sentence_id] = sentence_map
f.close()
return sentences
# Removes blank tokens from the sentence
def remove_blank_tokens(sentence):
updated_tokens = []
for token in sentence:
token = token.strip()
if token:
updated_tokens.append(token)
return updated_tokens
# Reads the train sentences/reviews from the train file.
# Returns local context of aspect terms and its associated polarity
# for each sentence.
def get_train_lines(train_file_path):
train_data = []
sentences = get_train_sentences(train_file_path)
for sent_id, sentence_map in sentences.iteritems():
sentence = sentence_map[CONST_SENTENCE]
sentence = sentence.split(" ")
sentence = remove_blank_tokens(sentence)
aspect_terms = sentence_map[CONST_ASPECT_TERMS]
aspect_terms_polarity = sentence_map[CONST_ASPECT_TERMS_POLARITY]
for idx in range(len(aspect_terms)):
aspect_term = aspect_terms[idx]
aspect_term_polarity = aspect_terms_polarity[idx]
aspect_term_list = aspect_term.split(" ")
context_sent = get_local_context(sentence, aspect_term_list)
# updating sentence to remove processed tokens
if aspect_term_list[0] in sentence:
processed_idx = sentence.index(aspect_term_list[0])
else:
processed_idx = get_similar_index(sentence, aspect_term_list[0])
sentence = sentence[processed_idx+1 : len(sentence)]
train_data.append((aspect_term_polarity, context_sent))
return train_data
# Returns local context sentence to the aspect term to be used for training
# the classifier.
def get_local_context(sentence, aspect_term):
aspect_term_size = len(aspect_term)
window_size = CONST_CONTEXT_WINDOW_SIZE
if aspect_term_size >= window_size:
return ' '.join(aspect_term)
remaining_size = window_size - aspect_term_size
left_size = remaining_size / 2
left_tokens = get_preceding_tokens(sentence, aspect_term, left_size)
right_size = remaining_size - len(left_tokens)
right_tokens = get_succeeding_tokens(sentence, aspect_term, right_size)
train_sentence = get_train_sentence(left_tokens, aspect_term, right_tokens)
return train_sentence
# Returns the concatenated string of left tokens, aspect terms and right tokens
# to be used as the training sentence
def get_train_sentence(left_tokens, aspect_term, right_tokens):
left = ' '.join(left_tokens)
term = ' '.join(aspect_term)
right = ' '.join(right_tokens)
return left + " " + term + " " + right
# Returns list of preceding tokens to the aspect term of the
# specified size.
def get_preceding_tokens(sentence, aspect_term, size):
tokens = []
first_aspect_term = aspect_term[0]
if first_aspect_term in sentence:
start_idx = sentence.index(first_aspect_term)
else:
start_idx = get_similar_index(sentence, first_aspect_term)
if size >= start_idx:
tokens = sentence[0: start_idx]
else:
tokens = sentence[start_idx - size: start_idx]
return tokens
# Returns the index of a similar sounding word (only different in some chars)
# to the aspect term in the sentence.
def get_similar_index(sentence, aspect_term):
idx = 0;
for word in sentence:
if aspect_term in word:
return idx
else:
idx += 1
return idx
# Returns list of succeeding tokens to the aspect term of the
# specified size.
def get_succeeding_tokens(sentence, aspect_term, size):
tokens = []
last_aspect_term = aspect_term[-1]
if last_aspect_term in sentence:
start_idx = last_index(last_aspect_term, sentence)
else:
start_idx = len(sentence) - 1 - get_similar_index(sentence[::-1], last_aspect_term)
remaining = len(sentence) - start_idx - 1
if size >= remaining:
tokens = sentence[start_idx + 1 : len(sentence)]
else:
tokens = sentence[start_idx + 1: start_idx + size + 1]
return tokens
# Returns the last index of the item in the specified list
def last_index(item, list):
return len(list) - 1 - list[::-1].index(item)
# Returns training data for the vectorizer from train lines
def get_train_data(train_lines):
train_data = []
train_labels = []
for train_line in train_lines:
train_labels.append(train_line[0])
train_data.append(train_line[1])
return train_data, train_labels
# Loads aspect terms' output from file
def load_aspect_terms(file_path):
with open(file_path) as output_file:
aspect_terms = json.load(output_file)
aspect_terms = ast.literal_eval(json.dumps(aspect_terms, ensure_ascii=False, encoding='utf8'))
return aspect_terms
# Reads the test sentences/reviews from the test file.
# Returns sentence id, review and aspect terms for each sentence.
def get_test_sentences(test_file_path):
sentences = {}
with io.open(test_file_path, 'r', encoding='utf8') as f:
# skipping first line
list(islice(f, 1))
# reading from line 2
for line in iter(f):
line = line.rstrip()
if line:
sentence_data = line.split('#')
# collecting aspect terms
aspect_terms = []
aspect_terms_str = sentence_data[4]
aspect_terms_str = remove_unwanted_tokens(aspect_terms_str)
aspect_terms_temp = aspect_terms_str.split('&')
aspect_terms_temp = remove_blank_tokens(aspect_terms_temp)
# sorting aspect terms based on their 'from' location
aspect_terms_map = {}
aspect_terms_from = sentence_data[2].split('&')
for idx in range(len(aspect_terms_temp)):
aspect_term = aspect_terms_temp[idx]
aspect_term_from = aspect_terms_from[idx]
aspect_terms_map[aspect_term] = int(aspect_term_from)
aspect_terms_seq_sorted = sorted(aspect_terms_map.items(), key=operator.itemgetter(1))
for aspect_term in aspect_terms_seq_sorted:
aspect_terms.append(aspect_term[0])
sentence_id = sentence_data[0]
sentence = sentence_data[1]
sentence = remove_unwanted_tokens(sentence)
sentence_map = {}
sentence_map[CONST_SENTENCE] = sentence
sentence_map[CONST_ASPECT_TERMS] = aspect_terms
sentences[sentence_id] = sentence_map
f.close()
return sentences
# Returns the list of test data for each sentence from the test file
# If the sentence does not have any aspect terms, then also such a
# sentence is included here. It is discarded while creating test data
# from test lines.
def get_test_lines(term_polarity_test_file):
test_lines = []
test_sentences = get_test_sentences(term_polarity_test_file)
for sent_id, sentence_data in test_sentences.iteritems():
aspect_terms = sentence_data[CONST_ASPECT_TERMS]
sentence = sentence_data[CONST_SENTENCE]
sentence = sentence.split(" ")
sentence = remove_blank_tokens(sentence)
sent_test_list = []
for idx in range(len(aspect_terms)):
aspect_term = aspect_terms[idx]
aspect_term = remove_unwanted_tokens(aspect_term)
aspect_term_list = aspect_term.split(" ")
context_sent = get_local_context(sentence, aspect_term_list)
# updating sentence to remove processed tokens
if aspect_term_list[0] in sentence:
processed_idx = sentence.index(aspect_term_list[0])
else:
processed_idx = get_similar_index(sentence, aspect_term_list[0])
sentence = sentence[processed_idx + 1: len(sentence)]
sent_test_list.append((aspect_term, context_sent))
test_lines.append((sent_id, sent_test_list))
return test_lines
# Returns the test data for the classifier
def get_test_data(test_lines):
test_data = []
for test_line in test_lines:
sent_data = test_line[1]
if len(sent_data) > 0:
for aspect_term_data in sent_data:
test_data.append(aspect_term_data[1])
return test_data
# Returns the output map from the classifiers output for each
# test sentence
def generate_output(prediction, test_lines):
aspect_terms_polarity = {}
prediction_idx = 0
for test_line in test_lines:
sentence_id = test_line[0]
sentence_map = {}
for aspect_terms in test_line[1]:
aspect_term = aspect_terms[0]
polarity = prediction[prediction_idx]
prediction_idx += 1
sentence_map[aspect_term] = polarity
aspect_terms_polarity[sentence_id] = sentence_map
return aspect_terms_polarity
'''
Run parameters:
python ats_svm_detect.py ./term_polarity_train.csv ./term_polarity_test.csv ./term_polarity_output.txt
'''
if __name__ == '__main__':
# fetching training lines
train_lines = get_train_lines(sys.argv[1])
# computing training data
train_data, train_labels = get_train_data(train_lines)
# fetching test data
test_lines = get_test_lines(sys.argv[2])
test_data = get_test_data(test_lines)
# training SVM
vectorizer = CountVectorizer(min_df=0.002, ngram_range=(1, 2), encoding="utf-8")
train_vectors = vectorizer.fit_transform(train_data)
# performing classification with SVM, kernel=linear
classifier_linear = svm.SVC(kernel='linear')
classifier_linear.fit(train_vectors, train_labels)
test_vectors = vectorizer.transform(test_data)
prediction = classifier_linear.predict(test_vectors)
# generating output
aspect_terms_polarity = generate_output(prediction, test_lines)
write_json(aspect_terms_polarity, sys.argv[3])
|
pnisarg/ABSA
|
src/ats_svm_detect.py
|
Python
|
mit
| 13,147
|
from __future__ import division
import numpy as np
import scipy.stats as stats
import scipy as sp
import logging
def calc_nbins(x, maximum=150):
n = (max(x) - min(x)) / (2 * len(x)**(-1/3) * (np.percentile(x, 75) - np.percentile(x, 25)))
return min(n, maximum)
def poisson_limits(N, kind, confidence=0.6827):
alpha = 1 - confidence
upper = np.zeros(len(N))
lower = np.zeros(len(N))
if kind == 'gamma':
lower = stats.gamma.ppf(alpha / 2, N)
upper = stats.gamma.ppf(1 - alpha / 2, N + 1)
elif kind == 'sqrt':
lower = sqrt(N)
upper = lower
else:
raise ValueError('Unknown distribution: {}'.format(kind))
# clip lower bars
lower[N==0] = 0
return N - lower, upper - N
def histpoints(x, bins=None, xerr=None, yerr='gamma', normed=False, **kwargs):
"""
Plot a histogram as a series of data points.
Compute and draw the histogram of *x* using individual (x,y) points
for the bin contents.
By default, vertical poisson error bars are calculated using the
gamma distribution.
Horizontal error bars are omitted by default.
These can be enable using the *xerr* argument.
Use ``xerr='binwidth'`` to draw horizontal error bars that indicate
the width of each histogram bin.
Paramters
---------
x : (n,) array or sequence of (n,) arrays
Input values. This takes either a single array or a sequency of
arrays which are not required to be of the same length
"""
import matplotlib.pyplot as plt
if bins is None:
bins = calc_nbins(x)
h, bins = np.histogram(x, bins=bins)
width = bins[1] - bins[0]
center = (bins[:-1] + bins[1:]) / 2
area = sum(h * width)
if isinstance(yerr, str):
yerr = poisson_limits(h, yerr)
if xerr == 'binwidth':
xerr = width / 2
if normed:
h = h / area
yerr = yerr / area
area = 1.
if not 'color' in kwargs:
kwargs['color'] = 'black'
if not 'fmt' in kwargs:
kwargs['fmt'] = 'o'
plt.errorbar(center, h, xerr=xerr, yerr=yerr, **kwargs)
return center, h, area
|
ibab/missing_hep
|
missing_hep/plotting.py
|
Python
|
mit
| 2,154
|
#\begin{align*}
#c \int_A \left[ \phi \hat u + \psi \hat v + \varphi \hat \eta \right] \, dA
#& = \int_A \left( \overline u \phi - \frac{1}{Ro k^2} \psi
#+ \frac{1}{Ro} \overline h \varphi \right) \hat u \, dA \\
#& = \int_A \left( (\partial_y \overline u - \frac{1}{Ro}) \phi + \overline u \psi - \frac{1}{Ro} \overline h \partial_y \varphi \right) \hat v \, dA \\
#& = \int_A \left( \frac{1}{Ro} \phi - \frac{1}{Ro k^2} \psi \partial_y + \varphi \overline u \right) \hat \eta \, dA \\
#\end{align*}
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
from firedrake import *
from firedrake.petsc import PETSc
try:
from slepc4py import SLEPc
except ImportError:
import sys
warning("Unable to import SLEPc, eigenvalue computation not possible (try firedrake-update --slepc)")
sys.exit(0)
# Define an Interval Mesh
Ly = 10.0
Uj = 0.5
n0 = 200
mesh = IntervalMesh(n0, Ly)
x = SpatialCoordinate(mesh)
# Define parameters
beta = Constant('0.0')
Bu = Constant('0.0004')
Ro = Constant('0.25')
# Profile
profile = 'bickley'
# Print parameters
print('Ro = ', float(Ro))
print('Bu = ', float(Bu))
print('profile = ', profile)
# Order of the Method
p = 0 # Define CG function space
# Function Spaces
V = FunctionSpace(mesh,'CG',p+2)
Z = V*V*V
# Impose zero Dirichlet BCs
bc = DirichletBC(Z.sub(1), 0.0, "on_boundary")
# Define modes
emodes_real, emodes_imag = Function(V), Function(V)
# Define modes
em_real, em_imag = Function(Z), Function(Z)
u_real, u_imag = Function(V), Function(V)
v_real, v_imag = Function(V), Function(V)
eta_real, eta_imag = Function(V), Function(V)
# TEST/TRIAL FUNCTIONS
(u, v, eta) = TrialFunctions(Z)
(phi, psi, vphi) = TestFunctions(Z)
# Define Basic State
Ub = Function(V).interpolate(1./pow(cosh(x[0]-Ly/2),2))
dUb = Function(V).interpolate(-2.*sinh(x[0]-Ly/2)/pow(cosh(x[0]-Ly/2),3)) # FJP: compute correctly
Eb = Function(V).interpolate(-tanh(x[0]-Ly/2)) # FJP: remove?
Hb = Function(V).interpolate(1. + Ro*Bu*Eb)
#plot(Ub)
#plt.show()
#plot(Hb)
#plt.show()
num_eigenvalues = 4
# Wavenumber
dkk = 5e-2
kk = np.arange(0.5 - dkk,.5,dkk)
#dk = 0.05
#kk = np.arange(dk, 2., dk)
kL = len(kk)
egs_re = np.zeros((len(kk),num_eigenvalues))
egs_im = np.zeros((len(kk),num_eigenvalues))
cnt = 0
for k in kk:
k2 = Constant(k**2)
# Define Weak form
a = ( ( Ub*phi - psi/(Ro*k2) + Hb/(Ro*Bu)*vphi)*u )*dx \
+ ( ( (dUb - 1./Ro)*phi + Ub*psi - Hb/(Ro*Bu)*vphi.dx(0) )*v )*dx \
+ ( phi*eta/Ro - psi*eta.dx(0)/(Ro*k2) + Ub*vphi*eta )*dx
m = (phi*u + psi*v + vphi*eta)*dx
# Build Petsc operators
petsc_a = assemble(a, mat_type='aij', bcs=bc).M.handle
petsc_m = assemble(m, mat_type='aij', bcs=bc).M.handle
# Define Petsc options
opts = PETSc.Options()
opts.setValue("eps_gen_non_hermitian", None)
opts.setValue("st_pc_factor_shift_type", "NONZERO")
opts.setValue("eps_type", "lapack")
#opts.setValue("eps_type", "krylovschur")
opts.setValue("eps_largest_imaginary", None)
opts.setValue("eps_tol", 1e-10)
# Define Solver options
es = SLEPc.EPS().create(comm=COMM_WORLD)
es.setDimensions(num_eigenvalues)
es.setOperators(petsc_a, petsc_m)
es.setFromOptions()
es.solve()
# Additionally we can find the number of converged eigenvalues. ::
nconv = es.getConverged()
imax = min(nconv, num_eigenvalues)
for i in range(imax):
with em_real.dat.vec as vr:
with em_imag.dat.vec as vi:
lam = es.getEigenpair(i, vr, vi)
#print ("Iteration #: "), i, ("| Real Eigenfrequency: "), lam.real, ("| Imag Eigenfrequency: "), lam.imag
egs_re[cnt,i] = k*lam.real
egs_im[cnt,i] = k*lam.imag
u_real, v_real, eta_real = em_real.split()
u_imag, v_imag, eta_imag = em_imag.split()
## Find eigenfunctions
#emodes_real.vector()[:], emodes_imag.vector()[:] = vr, vi
#print "Leading eigenvalue is:", lam, " for cnt = ", cnt, " with nconv = ", nconv
print k, egs_im[cnt,0]
cnt += 1
print np.max(abs(egs_im))
|
francispoulin/firedrakeQG
|
LSA_SW_Jet_Firedrake.py
|
Python
|
mit
| 4,284
|
import logging
from urlparse import urlparse
from django.dispatch import Signal
from corsheaders import signals
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
WHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']
webhook_github = Signal(providing_args=['project', 'data', 'event'])
webhook_gitlab = Signal(providing_args=['project', 'data', 'event'])
webhook_bitbucket = Signal(providing_args=['project', 'data', 'event'])
def decide_if_cors(sender, request, **kwargs):
"""
Decide whether a request should be given CORS access.
This checks that:
* The URL is whitelisted against our CORS-allowed domains
* The Domain exists in our database, and belongs to the project being queried.
Returns True when a request should be given CORS access.
"""
host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0]
valid_url = False
for url in WHITELIST_URLS:
if request.path_info.startswith(url):
valid_url = True
if valid_url:
project_slug = request.GET.get('project', None)
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
log.warning(
'Invalid project passed to domain. [{project}:{domain}'.format(
project=project_slug,
domain=host,
)
)
return False
domain = Domain.objects.filter(
domain__icontains=host,
project=project
)
if domain.exists():
return True
return False
signals.check_request_enabled.connect(decide_if_cors)
|
tddv/readthedocs.org
|
readthedocs/core/signals.py
|
Python
|
mit
| 1,712
|
# -*- coding: utf-8 -*-
import random;
class PsychComponentBase:
def __init__(self,
NumAFC = 1,
GiveFeedback = False,
Conditions = None,
OutputFile = ''):
# start the random number generator
random.seed();
# These are all the parameters that are common to all procedures
self._NumAFC = NumAFC;
self._GiveFeedback = GiveFeedback;
self._Conditions = Conditions;
self._ChoiceAnswers;
self._Status;
self._OutputFile = OutputFile;
def GetRandomInterval(self):
return random.randint(1,self.NumberAFC)
def GetTrial(self, index):
pass
def Initialise():
pass
def InvokeFeedbackEvent():
pass
#all logging should also be in here
def GetConditionsDetails(): pass
def GetExperimentHeader(): pass
def GetParameterDetails(): pass
def GetTrialHeader(): pass
def GetResponseInfo(): pass
def WriteLogFile(): pass
class ConstantStimulusPsychComponent (PsychComponentBase):
def __init__(self, *args, **kwargs):
super(ConstantStimulusPsychComponent, self).__init__(*args, **kwargs)
self._Rep;
self._StimVals = list();
self._NumTrials;
self._FeedbackFrequency;
self._CurrentTrial;
def Initialise():
pass
def AddStimval(self, stimval):
''' Adds stimulus values to the list of stimulus values - checks that
input is not a string, but should also not be a logical etc: allowed
types should be float, double and integer'''
if not isinstance(stimval, str):
self._StimVals.append(stimval);
else:
# should perhaps be some kind of try/catch statement
print "Error: stimulus values cannot be strings!"
def SetNextTrail():
pass
def EvaluateResponse (trial):
pass
def Start():
pass
def Stop():
pass
|
lugtigheid/PychophysicsLibrary
|
pycholib/PsychComponentBase.py
|
Python
|
mit
| 1,733
|
"""
Investigate how to reconstruct parameters from scarce information
"""
import sys
from datetime import datetime
import numpy as np
import networkx as nx
from tqdm import tqdm, trange
from system import System
from reconstruction import Reconstructor
from visualization import show_reconstruction_overview
class DictWrapper(dict):
"""
Dict with dot-notation access functionality
"""
def __getattr__(self, attr):
if not attr in self:
raise KeyError('{} not in {}'.format(attr, self.keys()))
return self.get(attr)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def main(inp):
"""
Main interface
"""
if inp is None:
# generate basis of system
graph = nx.gnp_random_graph(10, 0.6)
dim = len(graph.nodes())
assert nx.is_connected(graph), 'Graph not connected'
orig_A = nx.to_numpy_matrix(graph)
orig_B = np.random.uniform(10, 20, size=dim)
nz = np.nonzero(orig_A)
orig_A[nz] = np.random.uniform(2, 5, size=len(nz[0]))
print('Original A:\n', orig_A)
print('Original B:', orig_B)
omega = 3
OMEGA_list = [2.9,3.05,3.1,3.2]#np.arange(3.7, 4.3, 0.05)
# generate solutions
data = []
for OMEGA in tqdm(OMEGA_list):
runs = []
for i in trange(dim):
mask = np.ones(dim, dtype=bool)
mask[i] = 0
Bvec = orig_B.copy()
Bvec[mask] = 0
syst = System(orig_A, Bvec, omega, OMEGA)
sols, ts = syst.solve(0.01, 100)
pdiffs = Reconstructor.extract_phase_differences(sols, ts, syst.Phi)
#print(pdiffs)
#System.plot_solution(syst.Phi(ts), sols, ts)
if pdiffs is not None:
runs.append(pdiffs)
if len(runs) > 0:
data.append(((OMEGA, omega), runs))
# cache results
fname = '{}_{}'.format(datetime.now().strftime('%Y%m%d%H%M%S'), dim)
np.save('cache/{}'.format(fname), {
'data': data,
'ts': ts,
'orig_A': orig_A,
'orig_B': orig_B
})
else:
data, ts = inp.item()['data'], inp.item()['ts']
orig_A, orig_B = inp.item()['orig_A'], inp.item()['orig_B']
dim = orig_A.shape[0]
print('Original A:\n', orig_A)
print('Original B:', orig_B)
# reconstruct parameters
recon = Reconstructor(ts, data, dim)
rec_A, rec_B = recon.reconstruct()
print('Reconstructed A:\n', rec_A)
print('Reconstructed B:', rec_B)
# plot result
bundle = DictWrapper({
'orig_A': orig_A,
'orig_B': orig_B,
'rec_A': rec_A,
'rec_B': rec_B
})
show_reconstruction_overview(bundle, verbose=dim < 20)
if __name__ == '__main__':
inp = None
if len(sys.argv) == 2:
inp = np.load(sys.argv[1])
main(inp)
|
kpj/OsciPy
|
main.py
|
Python
|
mit
| 2,996
|
from django import template
from package.context_processors import used_packages_list
register = template.Library()
class ParticipantURLNode(template.Node):
def __init__(self, repo, participant):
self.repo = template.Variable(repo)
self.participant = template.Variable(participant)
def render(self, context):
repo = self.repo.resolve(context)
participant = self.participant.resolve(context)
if repo.user_url:
user_url = repo.user_url % participant
else:
user_url = '%s/%s' % (repo.url, participant)
return user_url
@register.tag
def participant_url(parser, token):
try:
tag_name, repo, participant = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires exactly two arguments" % token.contents.split()[0])
return ParticipantURLNode(repo, participant)
@register.filter
def commits_over_52(package):
return package.commits_over_52()
@register.inclusion_tag('package/templatetags/_usage_button.html', takes_context=True)
def usage_button(context):
response = used_packages_list(context['request'])
response['STATIC_URL'] = context['STATIC_URL']
response['package'] = context['package']
if context['package'].pk in response['used_packages_list']:
response['usage_action'] = "remove"
response['image'] = "usage_triangle_filled"
else:
response['usage_action'] = "add"
response['image'] = "usage_triangle_hollow"
return response
|
pydanny/djangopackages
|
package/templatetags/package_tags.py
|
Python
|
mit
| 1,551
|
from mcstat.domain import Metric
def metrics(timestamp, interval, channel, aggr):
"""
:param timestamp: Timestamp of the metric.
:type timestamp: Unix time (time.time)
:param channel: The channel.
:type channel: (string ip, port)
:param interval: Lenght of time over which samples were collected
(in seconds).
:param aggr: Aggretated samples.
:type aggr: Aggr
"""
return Metric(timestamp=timestamp,
channel=channel,
bitrate=float(aggr.bytes * 8) / 1024 / interval,
packets=float(aggr.packets) / interval
)
|
mstarzyk/mcstat
|
src/mcstat/stat.py
|
Python
|
mit
| 641
|
__author__ = 'massimo'
__all__ = ["Miner"]
|
yangsibai/SiteMiner
|
SiteMiner/__init__.py
|
Python
|
mit
| 43
|
from eul019 import is_leap_year, days_in_months
def test_is_leap_year():
input_true = [1904, 1908, 1912, 2000, 2004]
input_false = [1900, 1901, 1902, 1903, 1905]
for i in input_true:
assert( is_leap_year(i) == True)
for i in input_false:
assert( is_leap_year(i) == False)
def test_days_in_months():
input_months =[(1, 1900),
(2, 1900),
(3, 1900),
(4, 1900),
(5, 1900),
(6, 1900),
(7, 1900),
(8, 1900),
(9, 1900),
(10, 1900),
(11, 1900),
(12, 1900),
(2, 1904)
]
expected = [31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31, 29]
for ind, val in enumerate(input_months):
assert( days_in_months(*val)== expected[ind])
|
henry808/euler
|
019/test_019.py
|
Python
|
mit
| 931
|
# Copyright 2014 BitPay, Inc.
# Copyright (c) 2015-2018 The PIVX developers
# Copyright (c) 2018 The Ion developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import subprocess
import os
import json
import sys
def bctest(testDir, testObj, exeext):
execprog = testObj['exec'] + exeext
execargs = testObj['args']
execrun = [execprog] + execargs
stdinCfg = None
inputData = None
if "input" in testObj:
filename = testDir + "/" + testObj['input']
inputData = open(filename).read()
stdinCfg = subprocess.PIPE
outputFn = None
outputData = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputData = open(testDir + "/" + outputFn).read()
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE,universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
print("OSError, Failed to execute " + execprog)
sys.exit(1)
if outputData and (outs[0] != outputData):
print("Output data mismatch for " + outputFn)
sys.exit(1)
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
print("Return code mismatch for " + outputFn)
sys.exit(1)
def bctester(testDir, input_basename, buildenv):
input_filename = testDir + "/" + input_basename
raw_data = open(input_filename).read()
input_data = json.loads(raw_data)
for testObj in input_data:
bctest(testDir, testObj, buildenv.exeext)
sys.exit(0)
|
mitchellcash/ion
|
src/test/bctest.py
|
Python
|
mit
| 1,562
|
from ArnoldCat import ArnoldCat
import matplotlib.pyplot as plt
import math
def tracerT(Nmax, Nmin=1, p=1, q=1):
file = open('store.txt', 'w')
for i in xrange(Nmin,Nmax+1):
arnoldCat = ArnoldCat(i, None, None, p, q)
T = arnoldCat.calculerT()
print>>file, T
print('N = '+str(i)+' , T = '+ str(T))
file.close()
def plotPeriode(lien, borne = None):
with open(lien) as f:
lines = f.read().splitlines()
x = xrange(1,len(lines)+1)
c = math.log10((1+math.sqrt(5))/2)
if borne == None :
plt.plot(x, lines, 'b-')
else :
plt.plot(x, lines, 'b-', x, [3*i for i in x], 'r-', x, [math.log10(i)/c for i in x], 'r-')
def plotPeriodeRatio(lien):
with open(lien) as f:
lines = f.read().splitlines()
x = xrange(1,len(lines)+1)
for i in xrange(len(lines)):
lines[i]=float(int(lines[i]))/(i+1)
plt.plot(x, lines, 'b-')
def plotPeriodeFibonacci(lien):
with open(lien) as f:
lines = f.read().splitlines()
F = [1, 1]
T = [1]
while F[1]<len(lines) :
T.append(lines[F[1]-1])
temp = F[1]
F[1] = F[0] + F[1]
F[0] = temp
x = xrange(1,len(T)+1)
plt.plot(x, T, 'bo', x, [i for i in x], 'r-', x, [2*i for i in x], 'r-')
def plotPeriodeLucas(lien):
with open(lien) as f:
lines = f.read().splitlines()
L = [2, 1]
T = [3]
while L[1]<len(lines) :
T.append(lines[L[1]-1])
temp = L[1]
L[1] = L[0] + L[1]
L[0] = temp
x = xrange(1,len(T)+1)
plt.plot(x, T, 'bo', x, [2*i-2 for i in x], 'r-', x, [i-1 for i in x], 'r-')
#plotPeriode('periode.txt', True)
#plotPeriodeRatio('periode.txt')
#plotPeriodeFibonacci('periode.txt')
#plotPeriodeLucas('periode.txt')
|
darkpudding/Projet-Fractal
|
ArnoldCatPeriod.py
|
Python
|
mit
| 1,788
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=30)),
('slug', models.SlugField(unique=True, null=True, blank=True)),
('tagline', models.TextField(blank=True)),
('sites', models.ManyToManyField(to='sites.Site')),
],
options={
'verbose_name': 'blog',
'verbose_name_plural': 'blogs',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('description', models.TextField(blank=True)),
('slug', models.SlugField(unique=True, blank=True)),
('blog', models.ForeignKey(related_name=b'categories', to='blog.Blog', on_delete=models.CASCADE)),
('parent_category', models.ForeignKey(related_name=b'child_categories', blank=True, to='blog.Category', null=True, on_delete=models.CASCADE)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user_name', models.CharField(max_length=50, verbose_name=b'Name', blank=True)),
('user_email', models.EmailField(max_length=75, verbose_name=b'Email address', blank=True)),
('user_url', models.URLField(verbose_name=b'Website', blank=True)),
('comment_text', models.TextField(max_length=5000)),
('rank', models.IntegerField(default=0)),
('submit_date', models.DateTimeField(auto_now_add=True)),
('ip_address', models.GenericIPAddressField(null=True, verbose_name=b'IP address', blank=True)),
('user_agent', models.TextField(blank=True)),
('is_public', models.BooleanField(default=True, help_text=b'Uncheck this box to make the comment effectively disappear from the site.', verbose_name=b'is public')),
('is_removed', models.BooleanField(default=False, help_text=b'Check this box if the comment is inappropriate. A "This comment has been removed" message will be displayed instead.', verbose_name=b'is removed')),
('is_spam', models.BooleanField(default=False, help_text=b'Check this box to flag as spam.', verbose_name=b'is spam')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name=b'Post Title')),
('slug', models.SlugField(unique_for_date=b'posted')),
('body', models.TextField(verbose_name=b'Post Contents')),
('edited', models.DateTimeField(auto_now=True, verbose_name=b'Last Edited')),
('posted', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Publication Date', blank=True)),
('status', models.CharField(default=b'DR', max_length=2, choices=[(b'DR', b'Draft'), (b'PR', b'Pending Review'), (b'PB', b'Published')])),
('sticky', models.BooleanField(default=False)),
('visibility', models.CharField(default=b'VI', max_length=2, choices=[(b'VI', b'Visible'), (b'PR', b'Private')])),
('comments_open', models.BooleanField(default=True)),
('author', models.ForeignKey(related_name=b'blog_entries', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('blog', models.ForeignKey(related_name=b'entries', to='blog.Blog', on_delete=models.CASCADE)),
('categories', models.ManyToManyField(to='blog.Category')),
('edited_by', models.ForeignKey(related_name=b'blog_edited_entries', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),
],
options={
'verbose_name': 'entry',
'verbose_name_plural': 'entries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('flag', models.CharField(max_length=100, verbose_name=b'flag')),
('flag_date', models.DateTimeField(auto_now_add=True)),
('comment', models.ForeignKey(related_name=b'flag', to='blog.Comment', on_delete=models.CASCADE)),
('user', models.ForeignKey(related_name=b'comment_flags', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('description', models.TextField(blank=True)),
('slug', models.SlugField(unique=True, blank=True)),
('blog', models.ForeignKey(related_name=b'tags', to='blog.Blog', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'tag',
'verbose_name_plural': 'tags',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='tag',
unique_together=set([('slug', 'blog')]),
),
migrations.AddField(
model_name='entry',
name='tags',
field=models.ManyToManyField(to='blog.Tag', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='comment',
name='entry',
field=models.ForeignKey(to='blog.Entry', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='comment',
name='parent',
field=models.ForeignKey(blank=True, to='blog.Comment', null=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(related_name=b'blog_comments', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='category',
unique_together=set([('slug', 'blog')]),
),
]
|
codefisher/djangopress
|
djangopress/blog/migrations/0001_initial.py
|
Python
|
mit
| 7,697
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import datetime
import decimal
from io import BytesIO
from zipfile import ZipFile
from tempfile import TemporaryFile
from openpyxl.xml.functions import tostring, xmlfile
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.utils.datetime import CALENDAR_WINDOWS_1900
from openpyxl.styles import Style
from openpyxl.styles.styleable import StyleArray
from openpyxl.tests.helper import compare_xml
import pytest
class DummyWorkbook:
def __init__(self):
self.shared_strings = IndexedList()
self._cell_styles = IndexedList(
[StyleArray([0, 0, 0, 0, 0, 0, 0, 0, 0])]
)
self._number_formats = IndexedList()
self.encoding = "UTF-8"
self.excel_base_date = CALENDAR_WINDOWS_1900
self.sheetnames = []
@pytest.fixture
def WriteOnlyWorksheet():
from ..write_only import WriteOnlyWorksheet
return WriteOnlyWorksheet(DummyWorkbook(), title="TestWorksheet")
@pytest.mark.lxml_required
def test_write_header(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
doc = ws._write_header()
next(doc)
doc.close()
header = open(ws.filename)
xml = header.read()
expected = """
<worksheet xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_append(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
def _writer(doc):
with xmlfile(doc) as xf:
with xf.element('sheetData'):
try:
while True:
body = (yield)
xf.write(body)
except GeneratorExit:
pass
doc = BytesIO()
ws.writer = _writer(doc)
next(ws.writer)
ws.append([1, "s"])
ws.append(['2', 3])
ws.append(i for i in [1, 2])
ws.writer.close()
xml = doc.getvalue()
expected = """
<sheetData>
<row r="1" spans="1:2">
<c r="A1" t="n">
<v>1</v>
</c>
<c r="B1" t="s">
<v>0</v>
</c>
</row>
<row r="2" spans="1:2">
<c r="A2" t="s">
<v>1</v>
</c>
<c r="B2" t="n">
<v>3</v>
</c>
</row>
<row r="3" spans="1:2">
<c r="A3" t="n">
<v>1</v>
</c>
<c r="B3" t="n">
<v>2</v>
</c>
</row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_dirty_cell(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
def _writer(doc):
with xmlfile(doc) as xf:
with xf.element('sheetData'):
try:
while True:
body = (yield)
xf.write(body)
except GeneratorExit:
pass
doc = BytesIO()
ws.writer = _writer(doc)
next(ws.writer)
ws.append((datetime.date(2001, 1, 1), 1))
ws.writer.close()
xml = doc.getvalue()
expected = """
<sheetData>
<row r="1" spans="1:2">
<c r="A1" t="n" s="1"><v>36892</v></c>
<c r="B1" t="n"><v>1</v></c>
</row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.parametrize("row", ("string", dict()))
def test_invalid_append(WriteOnlyWorksheet, row):
ws = WriteOnlyWorksheet
with pytest.raises(TypeError):
ws.append(row)
@pytest.mark.lxml_required
def test_cell_comment(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
from openpyxl.comments import Comment
from .. write_only import WriteOnlyCell
cell = WriteOnlyCell(ws, 1)
comment = Comment('hello', 'me')
cell.comment = comment
ws.append([cell])
assert ws._comments == [comment]
ws.close()
with open(ws.filename) as src:
xml = src.read()
expected = """
<worksheet xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1"><c r="A1" t="n"><v>1</v></c></row>
</sheetData>
<legacyDrawing r:id="commentsvml"></legacyDrawing>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_cannot_save_twice(WriteOnlyWorksheet):
from .. write_only import WorkbookAlreadySaved
ws = WriteOnlyWorksheet
ws.close()
with pytest.raises(WorkbookAlreadySaved):
ws.close()
with pytest.raises(WorkbookAlreadySaved):
ws.append([1])
@pytest.mark.lxml_required
def test_close(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
ws.close()
with open(ws.filename) as src:
xml = src.read()
expected = """
<worksheet xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_auto_filter(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
ws.auto_filter.ref = 'A1:F1'
ws.close()
with open(ws.filename) as src:
xml = src.read()
expected = """
<worksheet xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
<autoFilter ref="A1:F1"/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_frozen_panes(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
ws.freeze_panes = 'D4'
ws.close()
with open(ws.filename) as src:
xml = src.read()
expected = """
<worksheet xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<sheetViews>
<sheetView workbookViewId="0">
<pane xSplit="3" ySplit="3" topLeftCell="D4" activePane="bottomRight" state="frozen"/>
<selection pane="topRight"/>
<selection pane="bottomLeft"/>
<selection pane="bottomRight" activeCell="A1" sqref="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData/>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_write_empty_row(WriteOnlyWorksheet):
ws = WriteOnlyWorksheet
ws.append(['1', '2', '3'])
ws.append([])
ws.close()
with open(ws.filename) as src:
xml = src.read()
expected = """
<worksheet xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<sheetPr>
<outlinePr summaryRight="1" summaryBelow="1"/>
<pageSetUpPr/>
</sheetPr>
<sheetViews>
<sheetView workbookViewId="0">
<selection sqref="A1" activeCell="A1"/>
</sheetView>
</sheetViews>
<sheetFormatPr baseColWidth="10" defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:3">
<c r="A1" t="s">
<v>0</v>
</c>
<c r="B1" t="s">
<v>1</v>
</c>
<c r="C1" t="s">
<v>2</v>
</c>
</row>
<row r="2"/>
</sheetData>
</worksheet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_save():
from tempfile import NamedTemporaryFile
filename = NamedTemporaryFile(delete=False)
from openpyxl.workbook import Workbook
from ..write_only import save_dump
wb = Workbook(write_only=True)
save_dump(wb, filename)
|
cgimenop/Excel2Testlink
|
ExcelParser/lib/openpyxl/writer/tests/test_write_only.py
|
Python
|
mit
| 9,274
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Mattias Svala
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Chris Wesseling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.config
from libqtile import layout
from libqtile.confreader import Config
from test.layouts.layout_utils import assert_focus_path, assert_focused
class StackConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Stack(num_stacks=2),
layout.Stack(num_stacks=1),
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
screens = []
follow_mouse_focus = False
stack_config = pytest.mark.parametrize("manager", [StackConfig], indirect=True)
def _stacks(manager):
stacks = []
for i in manager.c.layout.info()["stacks"]:
windows = i["clients"]
current = i["current"]
stacks.append(windows[current:] + windows[:current])
return stacks
@stack_config
def test_stack_commands(manager):
assert manager.c.layout.info()["current_stack"] == 0
manager.test_window("one")
assert _stacks(manager) == [["one"], []]
assert manager.c.layout.info()["current_stack"] == 0
manager.test_window("two")
assert _stacks(manager) == [["one"], ["two"]]
assert manager.c.layout.info()["current_stack"] == 1
manager.test_window("three")
assert _stacks(manager) == [["one"], ["three", "two"]]
assert manager.c.layout.info()["current_stack"] == 1
manager.c.layout.delete()
assert _stacks(manager) == [["one", "three", "two"]]
info = manager.c.groups()["a"]
assert info["focus"] == "one"
manager.c.layout.delete()
assert len(_stacks(manager)) == 1
manager.c.layout.add()
assert _stacks(manager) == [["one", "three", "two"], []]
manager.c.layout.rotate()
assert _stacks(manager) == [[], ["one", "three", "two"]]
@stack_config
def test_stack_cmd_down(manager):
manager.c.layout.down()
@stack_config
def test_stack_addremove(manager):
one = manager.test_window("one")
manager.c.layout.next()
two = manager.test_window("two")
three = manager.test_window("three")
assert _stacks(manager) == [['one'], ['three', 'two']]
assert manager.c.layout.info()["current_stack"] == 1
manager.kill_window(three)
assert manager.c.layout.info()["current_stack"] == 1
manager.kill_window(two)
assert manager.c.layout.info()["current_stack"] == 0
manager.c.layout.next()
two = manager.test_window("two")
manager.c.layout.next()
assert manager.c.layout.info()["current_stack"] == 0
manager.kill_window(one)
assert manager.c.layout.info()["current_stack"] == 1
@stack_config
def test_stack_rotation(manager):
manager.c.layout.delete()
manager.test_window("one")
manager.test_window("two")
manager.test_window("three")
assert _stacks(manager) == [["three", "two", "one"]]
manager.c.layout.down()
assert _stacks(manager) == [["two", "one", "three"]]
manager.c.layout.up()
assert _stacks(manager) == [["three", "two", "one"]]
manager.c.layout.down()
manager.c.layout.down()
assert _stacks(manager) == [["one", "three", "two"]]
@stack_config
def test_stack_nextprev(manager):
manager.c.layout.add()
one = manager.test_window("one")
two = manager.test_window("two")
three = manager.test_window("three")
assert manager.c.groups()["a"]["focus"] == "three"
manager.c.layout.next()
assert manager.c.groups()["a"]["focus"] == "one"
manager.c.layout.previous()
assert manager.c.groups()["a"]["focus"] == "three"
manager.c.layout.previous()
assert manager.c.groups()["a"]["focus"] == "two"
manager.c.layout.next()
manager.c.layout.next()
manager.c.layout.next()
assert manager.c.groups()["a"]["focus"] == "two"
manager.kill_window(three)
manager.c.layout.next()
assert manager.c.groups()["a"]["focus"] == "one"
manager.c.layout.previous()
assert manager.c.groups()["a"]["focus"] == "two"
manager.c.layout.next()
manager.kill_window(two)
manager.c.layout.next()
assert manager.c.groups()["a"]["focus"] == "one"
manager.kill_window(one)
manager.c.layout.next()
assert manager.c.groups()["a"]["focus"] is None
manager.c.layout.previous()
assert manager.c.groups()["a"]["focus"] is None
@stack_config
def test_stack_window_removal(manager):
manager.c.layout.next()
manager.test_window("one")
two = manager.test_window("two")
manager.c.layout.down()
manager.kill_window(two)
@stack_config
def test_stack_split(manager):
manager.test_window("one")
manager.test_window("two")
manager.test_window("three")
stacks = manager.c.layout.info()["stacks"]
assert not stacks[1]["split"]
manager.c.layout.toggle_split()
stacks = manager.c.layout.info()["stacks"]
assert stacks[1]["split"]
@stack_config
def test_stack_shuffle(manager):
manager.c.next_layout()
manager.test_window("one")
manager.test_window("two")
manager.test_window("three")
stack = manager.c.layout.info()["stacks"][0]
assert stack["clients"][stack["current"]] == "three"
for i in range(5):
manager.c.layout.shuffle_up()
stack = manager.c.layout.info()["stacks"][0]
assert stack["clients"][stack["current"]] == "three"
for i in range(5):
manager.c.layout.shuffle_down()
stack = manager.c.layout.info()["stacks"][0]
assert stack["clients"][stack["current"]] == "three"
@stack_config
def test_stack_client_to(manager):
manager.test_window("one")
manager.test_window("two")
assert manager.c.layout.info()["stacks"][0]["clients"] == ["one"]
manager.c.layout.client_to_previous()
assert manager.c.layout.info()["stacks"][0]["clients"] == ["two", "one"]
manager.c.layout.client_to_previous()
assert manager.c.layout.info()["stacks"][0]["clients"] == ["one"]
assert manager.c.layout.info()["stacks"][1]["clients"] == ["two"]
manager.c.layout.client_to_next()
assert manager.c.layout.info()["stacks"][0]["clients"] == ["two", "one"]
@stack_config
def test_stack_info(manager):
manager.test_window("one")
assert manager.c.layout.info()["stacks"]
@stack_config
def test_stack_window_focus_cycle(manager):
# setup 3 tiled and two floating clients
manager.test_window("one")
manager.test_window("two")
manager.test_window("float1")
manager.c.window.toggle_floating()
manager.test_window("float2")
manager.c.window.toggle_floating()
manager.test_window("three")
# test preconditions, stack adds clients at pos of current
assert manager.c.layout.info()['clients'] == ['three', 'one', 'two']
# last added window has focus
assert_focused(manager, "three")
# assert window focus cycle, according to order in layout
assert_focus_path(manager, 'one', 'two', 'float1', 'float2', 'three')
|
ramnes/qtile
|
test/layouts/test_stack.py
|
Python
|
mit
| 8,291
|
#! /usr/bin/env python
from random import choice
class Sieve:
@staticmethod
def __numbers(upper_bound):
ns = [ (n, []) for n in range(1, upper_bound) ]
ns[0][1].append(0)
return ns
@staticmethod
def __next_current_index(numbers, current_index):
current_index += 1
while (len(numbers[current_index][1]) > 0):
current_index += 1
return current_index
def __init__(self, upper_bound):
self.numbers = Sieve.__numbers(upper_bound)
self.__sieve();
def __sieve_step(self, current_index):
d = self.numbers[current_index][0]
index = 2
while d * index - 1 < len(self.numbers):
self.numbers[d * index - 1][1].append(d)
index += 1
def __sieve(self):
current_index = 1
while 2 * self.numbers[current_index][0] - 1 < len(self.numbers):
self.__sieve_step(current_index)
current_index = Sieve.__next_current_index(self.numbers, current_index)
def for_primes(self):
for entry in self.numbers:
if len(entry[1]) == 0:
yield entry[0];
if __name__ == '__main__':
four_digit = []
for p in Sieve(10000).for_primes():
if p > 999:
four_digit.append(p)
p, q = choice(four_digit), choice(four_digit)
print min(p, q), max(p, q)
|
dvberkel/erotosthenes
|
sieve.py
|
Python
|
mit
| 1,376
|
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.fr/tf/property_status_frozen
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicFrTfPropertyStatusFrozen(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.fr/tf/property_status_frozen.txt"
host = "whois.nic.fr"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_registered(self):
eq_(self.record.registered, True)
|
huyphan/pyyawhois
|
test/record/parser/test_response_whois_nic_fr_tf_property_status_frozen.py
|
Python
|
mit
| 923
|
#!/usr/bin/python3
print("Content-Type: text/html")
print()
import json
import MySQLdb
import sys
import wiki
import ssl
import cgi
import calculator
import nlp
def analyze(data):
query = nlp.process(data)
response = {}
if(query['type'] == "wiki"):
response = encyclopedia(query['subject'])
if(query['type'] == "calc"):
response = calculator.main(query['subject'])
if(query['type'] == "error"):
response = query
return response
def encyclopedia(data):
connection = MySQLdb.connect (host = "127.0.0.1", user = "root", passwd = "", db = "dutchman")
cursor = connection.cursor ()
cursor.execute ("SELECT response from Wikipedia WHERE query=%s",[data])
respond = cursor.fetchall()
response = {}
response['type'] = 'wiki'
if respond:
response['content'] = respond[0][0]
cursor.execute ("SELECT url from Wikipedia WHERE query=%s",[data])
respond = cursor.fetchall()
if respond:
response['url'] = respond[0][0]
cursor.execute ("SELECT title from Wikipedia WHERE query=%s",[data])
respond = cursor.fetchall()
if respond:
response['title'] = respond[0][0]
if 'title' not in response:
response = wiki.info(data)
if response['type'] == 'wiki':
try:
cursor.execute("INSERT INTO Wikipedia VALUES (%s,%s,%s,%s)",(data,response['title'],response['content'],response['url']))
connection.commit()
except:
x = 1
cursor.close ()
connection.close
return response
form = cgi.FieldStorage()
message = form.getvalue("message", "error")
response = {}
if message[-1] == '\n':
message = message[:-1]
if message == "welcome":
response = nlp.on_load_function()
response['content'] = response['content'] + ' ' + nlp.start()
elif message == "continue_or_not":
response = nlp.continue_or_not()
elif nlp.is_concluding(message):
response = nlp.parting()
else:
response = analyze(message)
print(json.dumps(response))
sys.exit()
|
apoorvapacharne/Darsy
|
python/main.py
|
Python
|
mit
| 1,872
|
"""
Metrics implementation.
Author: Evgeny Sergeev, 471
"""
import math
from model import get_user_vector, rate_prediction
def calculate_error(data, test_songs, f):
"""
The subsidiary function for MAE and RMSE metrics.
"""
number_of_rates = 0
sum_of_rates = 0.0
all_users = data.keys()
for user in all_users:
user_vector = get_user_vector(data, user, test_songs)
for song in user_vector:
real_rate = data[user].get(song, -1)
if real_rate == -1:
continue
prediction = rate_prediction(data, user, song)
number_of_rates += 1
diff = real_rate - prediction
sum_of_rates += f(diff)
return sum_of_rates / number_of_rates if number_of_rates != 0 else 0
def rmse(data, test_songs):
"""
Calculates the root square mean error.
"""
error = calculate_error(data, test_songs, lambda x: x ** 2)
return math.sqrt(error)
def mae(data, test_songs):
"""
Calculates the mean absolute error.
"""
return calculate_error(data, test_songs, abs)
def dcg(data, test_songs, ideal=False):
"""
Calculates the discounted cumulative gain.
"""
number_of_rates = 0
sum_of_rates = 0.0
all_users = data.keys()
for user in all_users:
user_data = data[user]
sorted_user_data = sorted(user_data.items())
items = enumerate(sorted_user_data, 1)
for song_position, (song, real_rate) in items:
if song not in test_songs:
continue
number_of_rates += 1
log = math.log2(float(song_position))
maximum = max(1.0, log)
if ideal:
sum_of_rates += real_rate / maximum
else:
sum_of_rates += rate_prediction(data, user, song) / maximum
return sum_of_rates / number_of_rates if number_of_rates != 0 else 0
def ndcg(data, test_songs):
"""
Calculates the normalized discounted cumulative gain.
"""
ideal_dcg_value = dcg(data, test_songs, ideal=True)
dcg_value = dcg(data, test_songs)
return dcg_value / ideal_dcg_value if ideal_dcg_value != 0 else 0
|
MaKToff/SPbSU_Homeworks
|
Semester 7/Recommender system/metrics.py
|
Python
|
mit
| 2,196
|
### -*- coding: utf-8 -*- ####################################################
from django.contrib.contenttypes.models import ContentType
def default_upload_to(instance, file_name):
app_label, model = ContentType.objects.get_for_model(instance).natural_key()
return 'uploads/%s/%s/%s_%s' % (app_label, model, instance.pk or '0', file_name)
|
Arpaso/django-banner-simple
|
src/banner/utils.py
|
Python
|
mit
| 350
|
import warnings
import tensorflow as tf
from keras.models import Model
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Reshape
from keras.layers import Lambda
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import UpSampling2D
def model(input_shape=(240, 320, 3)):
"""Instantiates the baseline architecture.
# Arguments
input_shape: optional shape tuple,
It should have exactly 3 input channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid input shape.
"""
img_input = Input(shape=input_shape)
x = Conv2D(64, (5, 5), activation='relu', padding='same', name='conv1')(img_input)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)
x = Conv2D(192, (3, 3), activation='relu', padding='same', name='conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)
x = Conv2D(384, (3, 3), activation='relu', padding='same', name='conv3')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv4')(x)
x = Conv2D(3, (3, 3), activation='relu', padding='same', name='conv5')(x)
# Top Layers
x = Flatten()(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(80*60*3, activation='relu', name='fc2')(x)
x = Reshape((60,80,3))(x)
x = UpSampling2D(size=4)(x)
x = Lambda(lambda x: tf.nn.l2_normalize(x, 3) )(x)
# Create model.
inputs = img_input
model = Model(inputs, x, name='baseline')
return model
|
kaykanloo/msc-project
|
Code/Models/BaselineUpSample.py
|
Python
|
mit
| 1,743
|
# -*- coding: utf-8 -*-
# @Author: jerry
# @Date: 2018-02-01 22:57:12
# @Last Modified by: jerry
from functools import lru_cache
@lru_cache(maxsize=32)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
print([fib(n) for n in range(10)])
# Output: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
"""
from logbook import Logger, StreamHandler
import sys
StreamHandler(sys.stdout).push_application()
log = Logger("My Awesome logger")
log.warn("This is too cool for stdlibrary")
"""
import os
from time import sleep
import logbook
from logbook.more import ColorizedStderrHandler
print(os.path.abspath(''))
path = '.'
LOG_DIR = os.path.join(path, 'log')
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
def get_logger(name='Test', file_log=True, level=None):
logbook.set_datetime_format('local')
ColorizedStderrHandler(bubble=False, level=level).push_thread()
if file_log:
logbook.TimedRotatingFileHandler(os.path.join(LOG_DIR, '%s.log' % name),
date_format='%Y-%m-%d-%H', bubble=True, encoding='utf-8').push_thread()
return logbook.Logger(name)
LOG = get_logger(file_log=True, level='INFO')
if __name__ == "__main__":
for item in range(1000):
LOG.info('Log-info')
sleep(0.5)
|
tencrance/cool-config
|
python3/new_feature.py
|
Python
|
mit
| 1,286
|
def to_bool(s):
"""Convert given string into bool value. Defaults to False.
"""
return (s or '').lower() in ['1', 'y', 'yes', 't', 'true']
|
ikumen/bits
|
backend/app/helpers.py
|
Python
|
mit
| 152
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from djblets.webapi.fields import IntFieldType
from reviewboard.diffviewer.features import dvcs_feature
from reviewboard.diffviewer.models import FileDiff
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_diff_comment import \
BaseDiffCommentResource
class ReviewDiffCommentResource(BaseDiffCommentResource):
"""Provides information on diff comments made on a review.
If the review is a draft, then comments can be added, deleted, or
changed on this list. However, if the review is already published,
then no changes can be made.
"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
policy_id = 'review_diff_comment'
model_parent_key = 'review'
mimetype_list_resource_name = 'review-diff-comments'
mimetype_item_resource_name = 'review-diff-comment'
def get_queryset(self, request, review_id, *args, **kwargs):
q = super(ReviewDiffCommentResource, self).get_queryset(
request, *args, **kwargs)
return q.filter(review=review_id)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
required=dict({
'filediff_id': {
'type': IntFieldType,
'description': 'The ID of the file diff the comment is on.',
},
'first_line': {
'type': IntFieldType,
'description': 'The line number the comment starts at.',
},
'num_lines': {
'type': IntFieldType,
'description': 'The number of lines the comment spans.',
},
}, **BaseDiffCommentResource.REQUIRED_CREATE_FIELDS),
optional=dict({
'base_filediff_id': {
'type': IntFieldType,
'description': 'The ID of the base filediff for the '
':term:`cumulative diff` the comment is on.\n'
'\n'
'This is only supported for review requests '
'created with commit history support.',
},
'interfilediff_id': {
'type': IntFieldType,
'description': 'The ID of the second file diff in the '
'interdiff the comment is on.',
},
}, **BaseDiffCommentResource.OPTIONAL_CREATE_FIELDS),
allow_unknown=True,
)
def create(self, request, filediff_id, interfilediff_id=None,
base_filediff_id=None, *args, **kwargs):
"""Creates a new diff comment.
This will create a new diff comment on this review. The review
must be a draft review.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
filediff = None
interfilediff = None
invalid_fields = {}
try:
filediff = FileDiff.objects.get(
pk=filediff_id,
diffset__history__review_request=review_request)
except ObjectDoesNotExist:
invalid_fields['filediff_id'] = [
'This is not a valid filediff ID.',
]
if filediff is None or not dvcs_feature.is_enabled(request=request):
base_filediff_id = None
if base_filediff_id is not None:
if not review_request.created_with_history:
invalid_fields['base_filediff_id'] = [
'This field cannot be specified on review requests '
'created without history support.'
]
elif interfilediff_id is not None:
invalid_fields.update({
'base_filediff_id': [
'This field cannot be specified with '
'interfilediff_id.',
],
'interfilediff_id': [
'This field cannot be specified with '
'base_filediff_id.',
],
})
elif base_filediff_id == filediff_id:
invalid_fields['base_filediff_id'] = [
'This cannot be the same as filediff_id.',
]
elif base_filediff_id > filediff_id:
invalid_fields['base_filediff_id'] = [
'This is not a valid base filediff ID.',
]
else:
base_filediff_exists = (
FileDiff.objects
.filter(diffset_id=filediff.diffset_id,
pk=base_filediff_id)
.exclude(commit_id=filediff.commit_id)
.exists()
)
if not base_filediff_exists:
invalid_fields['base_filediff_id'] = [
'This is not a valid base filediff ID.',
]
else:
ancestor_ids = (
ancestor.pk
for ancestor in filediff.get_ancestors(
minimal=False)
)
if base_filediff_id not in ancestor_ids:
invalid_fields['base_filediff_id'] = [
'This is not a valid base filediff ID.',
]
if filediff and interfilediff_id:
if interfilediff_id == filediff.id:
invalid_fields.setdefault('interfilediff_id', []).append(
'This cannot be the same as filediff_id.')
else:
try:
interfilediff = FileDiff.objects.get(
pk=interfilediff_id,
diffset__history=filediff.diffset.history)
except ObjectDoesNotExist:
invalid_fields.setdefault('interfilediff_id', []).append(
'This is not a valid interfilediff ID.')
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields,
}
return self.create_comment(
request=request,
review=review,
comments_m2m=review.comments,
filediff=filediff,
interfilediff=interfilediff,
fields=('filediff', 'interfilediff', 'first_line', 'num_lines'),
base_filediff_id=base_filediff_id,
**kwargs)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=dict({
'first_line': {
'type': IntFieldType,
'description': 'The line number the comment starts at.',
},
'num_lines': {
'type': IntFieldType,
'description': 'The number of lines the comment spans.',
},
}, **BaseDiffCommentResource.OPTIONAL_UPDATE_FIELDS),
allow_unknown=True,
)
def update(self, request, *args, **kwargs):
"""Updates a diff comment.
This can update the text or line range of an existing comment.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
diff_comment = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
return self.update_comment(request=request,
review=review,
comment=diff_comment,
update_fields=('first_line', 'num_lines'),
**kwargs)
@webapi_check_local_site
@augment_method_from(BaseDiffCommentResource)
def delete(self, *args, **kwargs):
"""Deletes the comment.
This will remove the comment from the review. This cannot be undone.
Only comments on draft reviews can be deleted. Attempting to delete
a published comment will return a Permission Denied error.
Instead of a payload response, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseDiffCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of comments made on a review.
This list can be filtered down by using the ``?line=`` and
``?interdiff-revision=``.
To filter for comments that start on a particular line in the file,
using ``?line=``.
To filter for comments that span revisions of diffs, you can specify
the second revision in the range using ``?interdiff-revision=``.
"""
pass
def create_comment(self, request, comments_m2m, base_filediff_id=None,
**kwargs):
"""Create a review comment.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
comments_m2m (django.db.models.ManyToManyField):
The review's comments relation, where the new comment will be
added.
base_filediff_id (int, optional):
The ID of the base filediff for the :term:`cumulative diff` the
comment is on.
**kwargs (dict):
Additional keyword arguments to pass on to the base class
method.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
rsp = super(ReviewDiffCommentResource, self).create_comment(
comments_m2m=comments_m2m,
save=False,
**kwargs)
if (isinstance(rsp, tuple) and
isinstance(rsp[1], dict) and
self.item_result_key in rsp[1]):
comment = rsp[1][self.item_result_key]
if (base_filediff_id is not None and
dvcs_feature.is_enabled(request=request)):
comment.base_filediff_id = base_filediff_id
comment.save()
comments_m2m.add(comment)
return rsp
def serialize_object(self, obj, request=None, *args, **kwargs):
"""Serialize a diff comment.
Args:
obj (reviewboard.reviews.models.diff_comment.Comment):
The diff comment to serialize.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The serialized diff comment.
"""
result = super(ReviewDiffCommentResource, self).serialize_object(
obj, request=request, *args, **kwargs)
if not dvcs_feature.is_enabled(request=request):
result.pop('base_filediff_id', None)
return result
review_diff_comment_resource = ReviewDiffCommentResource()
|
chipx86/reviewboard
|
reviewboard/webapi/resources/review_diff_comment.py
|
Python
|
mit
| 12,421
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-25 06:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=255)),
('population', models.IntegerField(db_index=True)),
('altitude', models.IntegerField(db_index=True, null=True)),
('country', models.TextField(max_length=255)),
('latitude', models.FloatField(max_length=255)),
('longitude', models.FloatField(max_length=255)),
('airport_code', models.TextField(max_length=255, null=True)),
('updated', models.DateTimeField(auto_now=True, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Weather',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weather_stats', jsonfield.fields.JSONField()),
('updated', models.DateTimeField(auto_now=True, db_index=True)),
('created', models.DateTimeField(auto_now_add=True)),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='weather_parser.City')),
],
),
]
|
livingbio/weather-parser
|
src/weather_parser/migrations/0001_initial.py
|
Python
|
mit
| 1,726
|
__author__ = 'silvia'
import difflib
from collections import Counter, defaultdict
#from nltk.tokenize import word_tokenize
import csv
def csv_from_tuplelist(csvfile, tuplelist):
"""
create a csv file from a dictionary with a tuple list
"""
csv_name = open(csvfile, 'w', encoding='latin1', newline='')
csv_writer = csv.writer(csv_name, dialect='excel', lineterminator='\n')
for x in tuplelist:
csv_writer.writerow([x[0], x[1]])
### Comparar los términos del cluster que son menos frecuentes (st, tt, qt) contra el más frecuente (pt)
# pt = Primer término (más frecuente)
pt = []
# st = Segundo término (segundo más frecuente)
st = []
tt = []
qt = []
with open('words.csv', "r", encoding="latin1") as fin:
next(fin)
for l in fin:
#print(l)
line = l.split('"')
g = l.split('"')
pt.append(g[1])
st.append(g[3])
tt.append(g[5])
qt.append(g[7])
#print(type(st))
onevstwo = list(zip(pt, st))
#print(onevstwo)
onevsthree = list(zip(pt, tt))
onevsfour = list(zip(pt, qt))
#print(len(onevsfour))
sumof = onevstwo + onevsthree + onevsfour
adds = []
dels = []
addsdic = defaultdict(list)
deldic = defaultdict(list)
for a, b in onevstwo:
if len(b) != 0:
#print('{} => {}'.format(a, b))
for i, s in enumerate(difflib.ndiff(b, a)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(i, s)
#adds = []
#print(u'Delete "{}" from position {}'.format(s[-1], i))
dels.append(s[-1])
deldic[(b,a)].append(s[-1])
print(len(adds))
elif s[0] == '+':
#print(u'Add "{}" to position {}'.format(s[-1], i))
adds.append(s[-1])
addsdic[(b,a)].append(s[-1])
delscount = Counter(dels).most_common()
addscount = Counter(adds).most_common()
#csv_from_tuplelist("dels_count.csv", delscount)
#csv_from_tuplelist("adds_count.csv", addscount)
#### Diccionario con los errores que sólo dependen de eliminar un caracter
diffonedic = {}
diffone = []
for termstuple, errorlist in deldic.items():
if len(errorlist) < 2:
#print(termstuple[0], termstuple[1], errorlist[0])
diffone.append(errorlist[0])
diffonedic[(termstuple[0], termstuple[1])] = errorlist[0]
#onediffdel[(termstuple[0], termstuple[1])] =
#print(len(diffone))
diffonecount = Counter(diffone).most_common()
#csv_from_tuplelist("diffone_delscount.csv", diffonecount)
def getWwithE (string, dictio):
"""
Permite ver los términos en los que se encuentra un determinado error y las palabras más comunes dentro de los términos correctos y los incorrectos
:param string:
:param dictio:
:return:
"""
correcto = []
incorrecto = []
corr_incorr = {}
for termstuple, errorlist in dictio.items():
if string in errorlist:
correcto.append(termstuple[1])
incorrecto.append(termstuple[0])
print("correcto %s || error %s" % (termstuple[1], termstuple[0]))
corr_incorr[termstuple[1]] = termstuple[0]
correct_string = ' '.join(correcto).split()
wrong_string = ' '.join(incorrecto).split()
print("Palabras más comunes en térm. correctos: ", Counter(correct_string).most_common())
print("Palabras más comunes en térm. errados: ", Counter(wrong_string).most_common())
return corr_incorr
###Se prueba encontrar las palabras con error único (i, ",", o, a)
#ies = getWwithE("i", diffonedic)
##Conclusión: errores gramaticales en Poesía, Política, Judíos, Río, María, agrícolas, Filosofía, Ingeniería, Compañía, indígenas, Sociología
#commas = getWwithE(",", diffonedic)
##Conclusión: muchas palabras que terminan con , / en los correctos se escriben países entre paréntesis; en los errados se separa por coma
oes = getWwithE("o", diffonedic)
##Conclusión: errores gramaticales en Administración, Filósofos, Discriminación, Población (ción-words!)
|
silviaegt/bdcv_metadata
|
Clustering/CountDiff.py
|
Python
|
mit
| 4,073
|
import pytest
from taichi.lang import impl
import taichi as ti
from tests import test_utils
_TI_TYPES = [ti.i8, ti.i16, ti.i32, ti.u8, ti.u16, ti.u32, ti.f32]
_TI_64_TYPES = [ti.i64, ti.u64, ti.f64]
def _test_type_assign_argument(dt):
x = ti.field(dt, shape=())
@ti.kernel
def func(value: dt):
x[None] = value
func(3)
assert x[None] == 3
@pytest.mark.parametrize('dt', _TI_TYPES)
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_type_assign_argument(dt):
_test_type_assign_argument(dt)
@pytest.mark.parametrize('dt', _TI_64_TYPES)
@test_utils.test(exclude=[ti.opengl, ti.vulkan], require=ti.extension.data64)
def test_type_assign_argument64(dt):
_test_type_assign_argument(dt)
def _test_type_operator(dt):
x = ti.field(dt, shape=())
y = ti.field(dt, shape=())
add = ti.field(dt, shape=())
mul = ti.field(dt, shape=())
@ti.kernel
def func():
add[None] = x[None] + y[None]
mul[None] = x[None] * y[None]
for i in range(0, 3):
for j in range(0, 3):
x[None] = i
y[None] = j
func()
assert add[None] == x[None] + y[None]
assert mul[None] == x[None] * y[None]
@pytest.mark.parametrize('dt', _TI_TYPES)
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_type_operator(dt):
_test_type_operator(dt)
@pytest.mark.parametrize('dt', _TI_64_TYPES)
@test_utils.test(exclude=[ti.opengl, ti.vulkan], require=ti.extension.data64)
def test_type_operator64(dt):
_test_type_operator(dt)
def _test_type_field(dt):
x = ti.field(dt, shape=(3, 2))
@ti.kernel
def func(i: ti.i32, j: ti.i32):
x[i, j] = 3
for i in range(0, 3):
for j in range(0, 2):
func(i, j)
assert x[i, j] == 3
@pytest.mark.parametrize('dt', _TI_TYPES)
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_type_field(dt):
_test_type_field(dt)
@pytest.mark.parametrize('dt', _TI_64_TYPES)
@test_utils.test(exclude=[ti.opengl, ti.vulkan], require=ti.extension.data64)
def test_type_field64(dt):
_test_type_field(dt)
def _test_overflow(dt, n):
a = ti.field(dt, shape=())
b = ti.field(dt, shape=())
c = ti.field(dt, shape=())
@ti.kernel
def func():
c[None] = a[None] + b[None]
a[None] = 2**n // 3
b[None] = 2**n // 3
func()
assert a[None] == 2**n // 3
assert b[None] == 2**n // 3
if ti.types.is_signed(dt):
assert c[None] == 2**n // 3 * 2 - (2**n) # overflows
else:
assert c[None] == 2**n // 3 * 2 # does not overflow
@pytest.mark.parametrize('dt,n', [
(ti.i8, 8),
(ti.u8, 8),
(ti.i16, 16),
(ti.u16, 16),
(ti.i32, 32),
(ti.u32, 32),
])
@test_utils.test(exclude=[ti.opengl, ti.vulkan])
def test_overflow(dt, n):
_test_overflow(dt, n)
@pytest.mark.parametrize('dt,n', [
(ti.i64, 64),
(ti.u64, 64),
])
@test_utils.test(exclude=[ti.opengl, ti.vulkan], require=ti.extension.data64)
def test_overflow64(dt, n):
_test_overflow(dt, n)
@pytest.mark.parametrize('dt,val', [
(ti.u32, 0xffffffff),
(ti.u64, 0xffffffffffffffff),
])
@test_utils.test(require=ti.extension.data64)
def test_uint_max(dt, val):
# https://github.com/taichi-dev/taichi/issues/2060
impl.get_runtime().default_ip = dt
N = 16
f = ti.field(dt, shape=N)
@ti.kernel
def run():
for i in f:
f[i] = val
run()
fs = f.to_numpy()
for f in fs:
assert f == val
|
yuanming-hu/taichi
|
tests/python/test_types.py
|
Python
|
mit
| 3,515
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
from cvxpy.atoms.elementwise.elementwise import Elementwise
from cvxpy.atoms.elementwise.qol_elemwise import qol_elemwise
import numpy as np
class inv_pos(Elementwise):
""" Elementwise 1/x, x >= 0 """
def __init__(self, x):
super(inv_pos, self).__init__(x)
# Returns the elementwise inverse of x.
@Elementwise.numpy_numeric
def numeric(self, values):
return 1.0/values[0]
# Always positive.
def sign_from_args(self):
return u.Sign.POSITIVE
# Default curvature.
def func_curvature(self):
return u.Curvature.CONVEX
def monotonicity(self):
return [u.monotonicity.DECREASING]
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Reduces the atom to an affine expression and list of constraints.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
x = arg_objs[0]
ones = lu.create_const(np.mat(np.ones(size)), size)
obj, constraints = qol_elemwise([ones, x], size)
return (obj, constraints)
|
riadnassiffe/Simulator
|
src/tools/ecos/cvxpy/cvxpy/atoms/elementwise/inv_pos.py
|
Python
|
mit
| 2,063
|
# -*- coding: UTF-8 -*-
# (c)2013 Mik Kocikowski, MIT License (http://opensource.org/licenses/MIT)
# https://github.com/mkocikowski/esbench
import sys
import os.path
import unittest
import logging
import estools.common.log as log
log.set_up_logging(level=logging.ERROR)
def suite():
return unittest.defaultTestLoader.discover(os.path.dirname(__file__))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite())
# doing sys.exit(1) on test failure will signal test failure to other
# processes (this is for when the suite is run automatically, not by hand
# from the command line)
#
if not result.wasSuccessful():
sys.exit(1)
|
mkocikowski/estools
|
estools/test/units.py
|
Python
|
mit
| 719
|
from ._abstract import AbstractScraper
class CookEatShare(AbstractScraper):
@classmethod
def host(cls):
return "cookeatshare.com"
def title(self):
return self.schema.title()
def total_time(self):
return None
def image(self):
return self.schema.image()
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
|
hhursev/recipe-scraper
|
recipe_scrapers/cookeatshare.py
|
Python
|
mit
| 449
|
import tensorflow as tf
import sys
# change this as you see fit
image_path = sys.argv[1]
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("./tf_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("./tf_files/output_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
|
moshfiqur/shoes-classifier
|
label_image.py
|
Python
|
mit
| 1,127
|
from utils import get_sidereal_time
from process import open_fits, flatten_max, DataProcessor
import dateutil.parser
import os, shutil
dp = DataProcessor()
dp.outdir = 'test/out'
dp.verbose = 1
st = '0429'
path_end = os.path.join(st[0:2], st[2:4])
path = os.path.join('sid', path_end)
night = os.listdir(path)
dp.do_total = True
dp.indir = 'sid'
dp.do_filter = False
dp.do_output = False
dp.process_night(path, night)
from django.template import Context, Template
t = Template(open(os.path.join('clouds','templates','clouds','image.html')).read())
from catlib import parse_cat
point_list = map(lambda (i,row):row, parse_cat(os.path.join('test', 'out', 'cat', path, 'total.cat')).iterrows())
print len(point_list)
with open(os.path.join('test',st+'.html'), 'w') as out:
out.write(t.render(Context({'point_list': point_list,
'point_pk': -1,
'object': {'get_url': path+'/total' }
})))
|
Bjwebb/detecting-clouds
|
test-sid.py
|
Python
|
mit
| 988
|
#! /usr/bin/env python
#from exception import exception
import re
"""
Python class that parses a perftable file from GoldenGate
see 'help(perfTableParser.perfTableParser)' for details
"""
class localException(Exception):
pass
class perfTableParser:
"""
Classe permettant de parser les fichiers de perf de GoldenGate
Retourne une structure contenant les performances ( @TODO classe a definir)
"""
# Expression reguliere permettant de parser les noms de variables - cf 'processDeclarationName'
re_varName = re.compile(r"([a-zA-Z0-9_][a-zA-Z0-9_@<>]*)(\([a-zA-Z0-9_][a-zA-Z0-9_@<>]*\))?$")
def parseFile(self, parent, fileName):
"""
Parse le fichier dont le path est en argument
Met a jour le parent
"""
# Initialisation du parser
self.i__init()
self.parent = parent
try:
fHandle = open(fileName, 'r')
except:
self.errorMsg = "Unable to open file '" + fileName + "'"
raise exception(self.errorMsg)
try:
# Fonction interne de parse
self.i__parseFile(fHandle)
except localException ,e:
print "parsing failed at line ", self.lineNumber
print e
except ValueError, e:
print "parsing failed at line ", self.lineNumber
print e
# except Exception, e :
# print " INTERNAL[perfTableParser] : unknown exception found; please report bug"
# print e
# #raise e
else:
# Parsing seems OK
self.valid = True
fHandle.close()
return self.valid
def parseClipboard(self, parent, TextObject):
"""
Parse le text en argument
Met a jour le parent
"""
# Initialisation du parser
self.i__init()
self.parent = parent
try:
# Fonction interne de parse
self.i__parseText(TextObject)
except localException ,e:
print "parsing failed at line ", self.lineNumber
print e
except ValueError, e:
print "parsing failed at line ", self.lineNumber
print e
# except Exception, e :
# print " INTERNAL[perfTableParser] : unknown exception found; please report bug"
# print e
# #raise e
else:
# Parsing seems OK
self.valid = True
return self.valid
def i__init(self):
"""
Initializes the parsing - called by self.parse
"""
# Numero de ligne courant
self.lineNumber = 0
# Liste des variables
self.variablesList = None
# Number of datas per line
self.datasPerLine = 0
# Flag pour savoir si le parsing a reussi ou non
self.valid = False
# Message d'erreur
self.errorMsg = "?"
# Tableau final
self.myArray = []
# specification variable
self.specVarName = None
# performances
self.performancesNames =[]
def i__parseFile(self, fHandle):
"""
Fonction interne de parsing appelee par self.parseFile, qui retourne des exceptions en cas d'erreur
"""
numbersParse = False # True si on est en train de parser les datas
lastCommentLine = "%" # String de la derniere ligne de commentaires parsee
for line in fHandle:
self.lineNumber = self.lineNumber+1
# On debarasse la ligne des espaces & tabulations redondants et des caracs de fin de ligne
line = self.cleanUpLine(line)
if 0 <> len( line):
if numbersParse:
# Ligne de datas
self.processDataLine(line)
else:
# Ligne de 'commentaires'
if self.processCommentLine(line):
lastCommentLine = line
else:
# On est en train de commencer le parsing des datas
numbersParse = True
# La derniere ligne de commentaires contient les noms des variables
self.processDeclarationLine(lastCommentLine)
# On parse la premiere ligne de datas
self.processDataLine(line)
def i__parseText(self, TextObject):
"""
Fonction interne de parsing appelee par self.parseText, qui retourne des exceptions en cas d'erreur
"""
numbersParse = False # True si on est en train de parser les datas
for line in TextObject.split('\n'):
self.lineNumber = self.lineNumber+1
# On debarasse la ligne des espaces & tabulations redondants et des caracs de fin de ligne
line = self.cleanUpLine(line)
if 0 <> len( line):
if numbersParse:
# Ligne de datas
self.processDataLine(line)
else:
resultList = []
tokens = line.split()
for token in tokens:
myTupple = self.processDeclarationName(token)
resultList.append(myTupple)
self.variablesList = resultList
# On va maintenant calculer le nombre de datas attendues par ligne
finalSize = 0
for elt in resultList:
finalSize = finalSize + elt[2]
self.datasPerLine = finalSize
# On est en train de commencer le parsing des datas
numbersParse = True
## # On reconstruit la liste des inputs
## # Cas particulier sans sweep principal
## if None != self.specVarName:
## self.sweepVarNames.append(self.specVarName )
def cleanUpLine(self, line):
"""
Remove whitespaces, carriage return at the beginnig and at the end of the line
"""
line = line.expandtabs()
line = line.strip(None)
# Remove duplicate space
line = ' '.join(line.split())
return line
def processCommentLine(self, line):
"""
Parses the 'comment' line at the beginning of the file
returns True if it matches, else False
We also try to detect the sweep variables names
"""
if '%' <> line[0]:
return False
if line.startswith('% specification_variable'):
tmp = line.partition('% specification_variable')
tmpName = tmp[2].strip()
# Cas particulier ou aucun sweep principal n'est defini
if "<notDefined>" != tmpName:
self.specVarName = tmpName
elif line.startswith('% sweep_variables'):
tmp = line.partition('% sweep_variables')
tmpName = tmp[2]
# Cas particulier ou aucun sweep secondaire n'est defini
if " <notDefined>" != tmpName:
self.sweepVarNames = tmpName.split()
else:
self.sweepVarNames = []
elif line.startswith('%performance'):
tmp = line.split()
self.performancesNames.append(tmp[1])
return True
def processDeclarationLine(self, line):
"""
Processes the line that declares the variables , ie
% PIN PAE_c(RI) ACPR_left ACPR_right Pin Pout Pdc
% nom1 nom2 nom3(RI) nom4
si le nom n'est pas suivant d'une declaration entre parentheses, c'est un float
si le nom est suivi d'une decalaration entre parenthese, on attend (RI) ( reel / imaginaire)
Returns an array of tupples : [ (name, type, size), ...]
"""
resultList = []
tokens = line.split()
if '%' <> tokens.pop(0):
throw( "'%' expected at the begining of the variables declaration")
for token in tokens:
myTupple = self.processDeclarationName(token)
resultList.append(myTupple)
self.variablesList = resultList
# On va maintenant calculer le nombre de datas attendues par ligne
finalSize = 0
for elt in resultList:
finalSize = finalSize + elt[2]
self.datasPerLine = finalSize
return
def processDeclarationName(self, name):
"""
Traite une decalaration de nom dans la ligne de declaration des variables
Ex: "PIN" "PAE_c(RI)" ...
Returns a tupple : (name, type, size)
where:
'name' is the base name
'type' is the corresponding python type
'size' is the number of corresponding numbers of datas in the result array
"""
# On analyse chaque nom et on regarde si c'est un reel ou un complexe
myMatch = self.re_varName.match(name)
if None == myMatch:
raise localException( "Un-recognized variable declaration : '" + str(name) + "'" )
varName = myMatch.group(1)
myExtension = myMatch.group(2)
if None == myExtension:
myType = float
mySize = 1
elif "(RI)" == myExtension:
myType = complex
mySize = 2
else:
raise localException("Sorry, type '"+myExtension+"' is not supported")
return varName, myType, mySize
def processDataLine(self, line):
"""
Processes a line of datas
Checks that there is the right number of elements and that they are all floats
Returns a list of values corresponding to the variable types
"""
tokens = line.split()
if len(tokens) < self.datasPerLine:
raise localException( str(self.datasPerLine) + " values were expected, but I found " + str( len(tokens)) )
myList = []
for myTupple in self.variablesList:
lType = myTupple[1]
lSize = myTupple[2]
lArray = []
for i in range(lSize):
tmp = tokens.pop(0)
if tmp == '?':
# conversion des ? en 2
myFloat = float('2')
lArray.append(myFloat)
myNumber = lType(*lArray)
myList.append(myNumber)
elif tmp != 'X' and tmp != 'x':
# This will throw an error if data is not a float
myFloat = float(tmp)
lArray.append(myFloat)
# On convertit dans le type
myNumber = lType(*lArray)
myList.append(myNumber)
else:
myList.append(tmp.upper())
self.myArray.append(tuple(myList) )
return
def getSweepsNames(self):
return self.sweepVarNames
def getPerfsNames(self):
return self.performancesNames
################################################################################
|
trilomix/kmappy
|
perfTableparser.py
|
Python
|
mit
| 11,141
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Eduardo Frazão ( https://github.com/fr4z40 )
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
class mailextract(list):
@staticmethod
def server_dict(list_in):
rst = {}
for email in list_in:
server = email.split('@')[-1]
if server not in rst:
rst[server] = []
rst[server].append(email)
return(rst)
@staticmethod
def isvalid(email, filter_plus=None):
try:
email = (email.strip()).lower()
list_filter = list(set(('''
xxxx xxx@ 12345 fulano seuemail @mail @email usuario meuemail
@xx seublog meudominio meunome blabla .. @@ seunome user.name
noone mailto username mail@ someone somebody nobody user@
yourusername fake : teste testing
''').strip().split()))
if (filter_plus != None):
list_filter = list(set(list_filter + filter_plus))
if ((('@' in email) and ('.' in email)) and (email.count('@') == 1)):
len_test = email.split('@')
if (((len(len_test[0]) <= 36) and (len(len_test[0]) >= 3)) and
((len(len_test[1]) >= 7) and (len(len_test[1]) <= 25))):
condition = True
for flt in list_filter:
if flt in email:
condition = False
break
return(condition)
else:
return(False)
else:
return(False)
except:
return(False)
def __init__(self, string_in, filter_plus=None):
from string import punctuation
for item in list('._-@'):
punctuation = punctuation.replace(item,'')
for item in punctuation:
string_in = string_in.replace(item, ' ')
string_in = list(set(string_in.strip().split()))
for items in string_in:
for l in list('._-@'):
items = items.strip(l)
if self.isvalid(items, filter_plus):
self.append(items)
|
Python-Devs-Brasil/py-mailgrabber
|
pymg_pkg/mailextract.py
|
Python
|
mit
| 2,330
|
if __name__ == '__main__':
input('\nPress ENTER to continue\n')
|
wairton/zephyrus-mas
|
zephyrus/pause.py
|
Python
|
mit
| 68
|
import requests
import sys
import argparse
from requests.exceptions import ConnectionError
def get_url_info(short_url):
'''Gets info about the url'''
try:
response = requests.get(short_url)
url_info = {}
url_info['status'] = response.status_code
url_info['url'] = response.url
url_info['encoding'] = response.apparent_encoding
return url_info
except ConnectionError as e:
return None
def usage():
'''Prints usage'''
print('python url_lengthener.py <short_url>')
if __name__=='__main__':
if len(sys.argv) != 2:
usage()
exit(0)
else:
url_info = get_url_info(sys.argv[1])
if url_info:
for k,v in url_info.iteritems():
print('{0}: {1}'.format(k, v))
else:
print('Invald url: {0}'.format(sys.argv[1]))
|
tomaslu/url_lengthener
|
url_lengthener/url_lengthener.py
|
Python
|
mit
| 867
|
import os
from setuptools import setup
setup(
name='sim2net',
version='3.1.2',
author='Michal Kalewski <mkalewski at cs.put.poznan.pl>',
description='Simple Network Simulator (sim2net) is a discrete event '\
'simulator of mobile ad hoc networks (MANETs).',
long_description=open("README.rst").read(),
url='https://github.com/mkalewski/sim2net',
license='MIT License',
packages=[
'sim2net',
'sim2net.area',
'sim2net.cli',
'sim2net.failure',
'sim2net.mobility',
'sim2net.packet_loss',
'sim2net.placement',
'sim2net.propagation',
'sim2net.speed',
'sim2net.utility'
],
entry_points = {'console_scripts': ['sim2net=sim2net.cli.cli:main'],}
)
|
mkalewski/sim2net
|
setup.py
|
Python
|
mit
| 775
|
"""Gopher protocol client interface."""
__all__ = ["send_selector","send_query"]
# Default selector, host and port
DEF_SELECTOR = '1/'
DEF_HOST = 'gopher.micro.umn.edu'
DEF_PORT = 70
# Recognized file types
A_TEXT = '0'
A_MENU = '1'
A_CSO = '2'
A_ERROR = '3'
A_MACBINHEX = '4'
A_PCBINHEX = '5'
A_UUENCODED = '6'
A_INDEX = '7'
A_TELNET = '8'
A_BINARY = '9'
A_DUPLICATE = '+'
A_SOUND = 's'
A_EVENT = 'e'
A_CALENDAR = 'c'
A_HTML = 'h'
A_TN3270 = 'T'
A_MIME = 'M'
A_IMAGE = 'I'
A_WHOIS = 'w'
A_QUERY = 'q'
A_GIF = 'g'
A_HTML = 'h' # HTML file
A_WWW = 'w' # WWW address
A_PLUS_IMAGE = ':'
A_PLUS_MOVIE = ';'
A_PLUS_SOUND = '<'
_names = dir()
_type_to_name_map = {}
def type_to_name(gtype):
"""Map all file types to strings; unknown types become TYPE='x'."""
global _type_to_name_map
if _type_to_name_map=={}:
for name in _names:
if name[:2] == 'A_':
_type_to_name_map[eval(name)] = name[2:]
if _type_to_name_map.has_key(gtype):
return _type_to_name_map[gtype]
return 'TYPE=' + `gtype`
# Names for characters and strings
CRLF = '\r\n'
TAB = '\t'
def send_selector(selector, host, port = 0):
"""Send a selector to a given host and port, return a file with the reply."""
import socket
if not port:
i = host.find(':')
if i >= 0:
host, port = host[:i], int(host[i+1:])
if not port:
port = DEF_PORT
elif type(port) == type(''):
port = int(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(selector + CRLF)
s.shutdown(1)
return s.makefile('rb')
def send_query(selector, query, host, port = 0):
"""Send a selector and a query string."""
return send_selector(selector + '\t' + query, host, port)
def path_to_selector(path):
"""Takes a path as returned by urlparse and returns the appropriate selector."""
if path=="/":
return "/"
else:
return path[2:] # Cuts initial slash and data type identifier
def path_to_datatype_name(path):
"""Takes a path as returned by urlparse and maps it to a string.
See section 3.4 of RFC 1738 for details."""
if path=="/":
# No way to tell, although "INDEX" is likely
return "TYPE='unknown'"
else:
return type_to_name(path[1])
# The following functions interpret the data returned by the gopher
# server according to the expected type, e.g. textfile or directory
def get_directory(f):
"""Get a directory in the form of a list of entries."""
list = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
gtype = line[0]
parts = line[1:].split(TAB)
if len(parts) < 4:
print '(Bad line from server:', `line`, ')'
continue
if len(parts) > 4:
if parts[4:] != ['+']:
print '(Extra info from server:',
print parts[4:], ')'
else:
parts.append('')
parts.insert(0, gtype)
list.append(parts)
return list
def get_textfile(f):
"""Get a text file as a list of lines, with trailing CRLF stripped."""
list = []
get_alt_textfile(f, list.append)
return list
def get_alt_textfile(f, func):
"""Get a text file and pass each line to a function, with trailing CRLF stripped."""
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if line[:2] == '..':
line = line[1:]
func(line)
def get_binary(f):
"""Get a binary file as one solid data block."""
data = f.read()
return data
def get_alt_binary(f, func, blocksize):
"""Get a binary file and pass each block to a function."""
while 1:
data = f.read(blocksize)
if not data:
break
func(data)
def test():
"""Trivial test program."""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], '')
selector = DEF_SELECTOR
type = selector[0]
host = DEF_HOST
if args:
host = args[0]
args = args[1:]
if args:
type = args[0]
args = args[1:]
if len(type) > 1:
type, selector = type[0], type
else:
selector = ''
if args:
selector = args[0]
args = args[1:]
query = ''
if args:
query = args[0]
args = args[1:]
if type == A_INDEX:
f = send_query(selector, query, host)
else:
f = send_selector(selector, host)
if type == A_TEXT:
list = get_textfile(f)
for item in list: print item
elif type in (A_MENU, A_INDEX):
list = get_directory(f)
for item in list: print item
else:
data = get_binary(f)
print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
# Run the test when run as script
if __name__ == '__main__':
test()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.2/Lib/gopherlib.py
|
Python
|
mit
| 5,567
|
from string import maketrans
ori = 'Of zit kggd zitkt qkt ygxk ortfzoeqs wqlatzwqssl qfr zvg ortfzoeqs yggzwqssl. Fgv oy ngx vqfz zg hxz zitd of gft soft, piv dgfn lgsxzogfl qkt zitkt? Zohl: hstqlt eiqfut zit ygkd gy zit fxdwtk ngx utz. Zit Hkgukqddtkl!'
def ali_decode(letter):
t=maketrans("qwertyuiopasdfghjklzxcvbnm","abcdefghijklmnopqrstuvwxyz")
return letter.translate(t)
print(ali_decode(ori))
|
Akagi201/learning-python
|
puzzle/aliyun20151111.py
|
Python
|
mit
| 411
|
from pyndn import Interest
from pyndn import Data
from pyndn import Exclude
from pyndn import Name
from pyndn import Face
from pyndn import InterestFilter
from pyndn.security import KeyChain
import os
lastChunk_window = 0
lastChunk_sent = 0
def extractData_message(path, fileName, data):
payload = data.getContent()
dataName = data.getName()
dataName_size = dataName.size()
print "Extracting Data message name: ", dataName.toUri()
#print "Received data: ", payload.toRawStr()
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, fileName), 'ab') as temp_file:
temp_file.write(payload.toRawStr())
# if recieved Data is a segment of the file, then need to fetch remaning segments
# try if segment number is existed in Data Name
try:
dataSegmentNum = (dataName.get(dataName_size - 1)).toSegment()
lastSegmentNum = (data.getMetaInfo().getFinalBlockId()).toNumber()
print "dataSegmentNum" + str(dataSegmentNum)
print "lastSegmentNum" + str(lastSegmentNum)
# If segment number is available and what have recieved is not the FINAL_BLOCK, then fetch the NEXT segment
if lastSegmentNum != dataSegmentNum:
interestName = dataName.getSubName(0, dataName_size - 1)
interestName = interestName.appendSegment(dataSegmentNum + 1)
return False, interestName
#self._sendNextInterest(interestName, self.interestLifetime, 'pull')
# If segment number is available and what have recieved is the FINAL_BLOCK, then EXECUTE the configuration script
### Recieve all chunks of data --> Execute it here
elif lastSegmentNum == dataSegmentNum:
print "Received complete Data message: %s " %fileName
interestName = 'complete'
return True, interestName
else:
print 'Data segment failed'
except RuntimeError as e:
print "ERROR: %s" % e
#self.isDone = True
def request_SubsequenceDataChunk(path, fileName, data, window):
payload = data.getContent()
dataName = data.getName()
dataName_size = dataName.size()
print "Extracting Data message name: ", dataName.toUri()
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, fileName), 'ab') as temp_file:
temp_file.write(payload.toRawStr())
try:
dataSegmentNum = (dataName.get(dataName_size - 1)).toSegment()
lastSegmentNum = (data.getMetaInfo().getFinalBlockId()).toNumber()
print "dataSegmentNum" + str(dataSegmentNum)
print "lastSegmentNum" + str(lastSegmentNum)
if dataSegmentNum == self.lastChunk_window:
print 'Send Interest of next window frame'
firstChunk_sent = self.lastChunk_window + 1
self.lastChunk_window = self.lastChunk_window + self.window
if self.lastChunk_window <= lastSegmentNum:
print 'This is NOT the last frame'
self.lastChunk_sent = self.lastChunk_window
else:
print 'This is the last frame'
self.lastChunk_sent = lastSegmentNum
for chunkID in range (firstChunk_sent, self.lastChunk_sent + 1):
interestName = dataName.getSubName(0, dataName_size - 1)
interestName = interestName.appendSegment(chunkID)
self._sendNextInterest(interestName, self.interestLifetime, 'pull')
else:
print 'Already sent window frame, Waiting for Data message'
if lastSegmentNum == dataSegmentNum:
print "Received complete image: %s, EXECUTED !!!!" % fileName
self.lastChunk_window = 0
# If Configuration Manager has sent a file with 'install' key word, but no segment number is available, that DATA packet is invalid. Then just do nothing and exist the program
except RuntimeError as e:
print "ERROR: %s" % e
self.isDone = True
|
AdL1398/PiCasso
|
source/modules/tools/ndnMessage_Helper.py
|
Python
|
mit
| 4,025
|
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
"""
French typography
"""
import re
from .base import base_replacements
__all__ = ('replacements',)
ch = r'A-Za-z\u00C0-\u02FF\u0370-\u1EFF'
replacements = base_replacements + (
# double ""
(
re.compile(r'(?<!"|\w)"(?! |")((?:[^"]+?|")+?)'
r'(?<! |")"(?!["{0}])()'.format(ch), re.U),
'\u00AB\\1\u00BB'
),
# single ''
(
re.compile(r"(?<!'|\w)'(?! |')((?:[^']+?|')+?)"
r"(?<! |')'(?!['{0}])()".format(ch), re.U),
'\u2039\\1\u203A'
),
)
|
honzajavorek/tipi
|
tipi/langs/fr.py
|
Python
|
mit
| 603
|
# -*- coding: utf-8 -*-
#
# Antes de executar:
#
# > pip install requests
#
#
import sys
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.auth import HTTPBasicAuth
import xml.etree.ElementTree as ET
CONNECTIONS_HOST = 'https://connections.<company>.com'
CONNECTIONS_USERNAME = '<REPLACE_HERE>'
CONNECTIONS_PASSWORD = '<REPLACE_HERE>'
# Disable Warnings from Untrusted TLs keys
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Disable Warnings from Untrusted TLs keys
def doGet(url,headers,auth):
res = requests.get(url=url,headers=headers,auth=auth, verify=False)
if (res.status_code != 200):
print 'requests.get -> %s = %s\n' % (res.url, res)
return None;
#print res.content
return res.content
def parsePostDetails(content):
entry = ET.fromstring(content)
#entry = root.find('{http://www.w3.org/2005/Atom}entry')
#print '------------------------------------------------------------------------------------------------------------------------------------------------------'
#for child in entry:
# print child.tag, child.attrib
#print '------------------------------------------------------------------------------------------------------------------------------------------------------'
title = entry.find('{http://www.w3.org/2005/Atom}title').text
author = entry.find('{http://www.w3.org/2005/Atom}author')
name = author.find('{http://www.w3.org/2005/Atom}name').text
email = author.find('{http://www.w3.org/2005/Atom}email').text
blogPost = {
'title': title.encode("cp850"),
'name': name,
'email': email
}
#print profile
return blogPost
def getPostDetails(handle=None,entryId=None):
if (handle == None or entryId == None):
return None
url = '%s/blogs/%s/api/entries/%s' % (CONNECTIONS_HOST,handle,entryId)
headers = { 'Content-Type': 'application/atom+xml;charset=UTF-8'}
auth=HTTPBasicAuth(CONNECTIONS_USERNAME, CONNECTIONS_PASSWORD)
feed = doGet(url=url,headers=headers,auth=auth)
if (feed is None):
return None
blogPost = parsePostDetails(feed)
if (blogPost is None):
print 'Cannot get Blog Post Information.'
return None
print 'Post Details:\n'
print blogPost['title']
print ' |--> name: ' + blogPost['name']
print ' |--> email: ' + blogPost['email']
def parseBlogPosts(content):
posts = []
root = ET.fromstring(content)
entries = root.findall("{http://www.w3.org/2005/Atom}entry")
for entry in entries:
entryId = entry.find('{http://www.w3.org/2005/Atom}id').text
title = entry.find('{http://www.w3.org/2005/Atom}title').text
author = entry.find('{http://www.w3.org/2005/Atom}author')
name = author.find('{http://www.w3.org/2005/Atom}name').text
email = author.find('{http://www.w3.org/2005/Atom}email').text
#urn:lsid:ibm.com:blogs:entry-048667f2-400b-4b70-8c04-cc163403cba6
entryId = entryId[-36:]
post = {
'entryId': entryId,
'title': title.encode('utf-8'),
'name': name,
'email': email
}
posts.append(post)
return posts
def getBlogPosts(handle=None):
if (handle == None):
return None
url = '%s/blogs/%s/api/entries' % (CONNECTIONS_HOST,handle)
headers = { 'Content-Type': 'application/atom+xml;charset=UTF-8'}
auth=HTTPBasicAuth(CONNECTIONS_USERNAME, CONNECTIONS_PASSWORD)
feed = doGet(url=url,headers=headers,auth=auth)
if (feed is None):
return None
posts = parseBlogPosts(feed)
if (posts is None):
return None
return posts
#################### Main Module ###################
print 'Connecting to IBM Connections...\n'
handle = 'ce8716a1-3575-44fd-8b2e-4f5360fe03e1'
#entryId = '66ce5af8-d7e2-451c-9435-3f236accfc12'
#getPostDetails(handle,entryId)
posts = getBlogPosts(handle)
if (posts is None):
print 'Cannot get Blog Posts Information.'
sys.exit(1)
print 'Blog Posts:\n'
for post in posts:
print post['entryId']
print ' |--> name: ' + post['title']
print ' |--> name: ' + post['name']
print ' |--> email: ' + post['email']
print
|
ebasso/rest-client-examples
|
ibm-connections/ListBlogPosts.py
|
Python
|
mit
| 4,246
|
from proteus import Domain, Context
from proteus.mprans import SpatialTools as st
from proteus import Gauges as ga
from proteus import WaveTools as wt
from math import *
import numpy as np
from proteus.mprans import BodyDynamics as bd
opts=Context.Options([
# predefined test cases
("water_level", 0.19, "Height of free surface above bottom"),
# Geometry
("tank_dim", (11., 0.5,), "Dimensions of the tank"),
("tank_sponge", (1., 0.), "Length of relaxation zones (front/back, left/right)"),
("tank_BC", 'FreeSlip', "tank boundary conditions: NoSlip or FreeSlip"),
# waves
('wave', True, 'Enable generation'),
# paddle
("InputMotion",True,"Force paddle with motion"),
("period",1.55,"Motion period"),
("stroke",0.05,"Motion stroke"),
("rampS",1,"Ramp time start"),
("rampE",1,"Ramp time end"),
("Tpaddle",6, "Time of paddle operation"),
("paddle2D", True, "Switch on/off paddle2D"),
("center", (1., 0.2),"Coord of the paddle center"),
('dimx', 0.1, 'X-dimension of the paddle2D'),
('dimy', 0.4, 'Y-dimension of the paddle2D'),
('width', 1., 'Z-dimension of the paddle2D'),
('mass', 125., 'Mass of the paddle2D [kg]'),
('paddle_BC', 'FreeSlip', 'paddle2D boundaries: NoSlip or FreeSlip'),
("free_x", (1., 0., 0.), "Translational DOFs"),
("free_r", (0., 0., 0.), "Rotational DOFs"),
("paddle_inertia", 0.236, "Inertia of the paddle [kg m2]"),
("rotation_angle", 0., "Initial rotation angle (in degrees)"),
("Tn", 1e6, "Roll natural period"),
("overturning", False, "Switch on/off overturning module"),
('scheme', 'Forward_Euler', 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Forward_Euler)'),
# numerical options
("refinement_level", 0.0,"he=walength/refinement_level"),
("he", 0.1,"he=walength/refinement_level"),
("cfl", 0.4,"Target cfl"),
("duration", 30., "Durarion of the simulation"),
("freezeLevelSet", True, "No motion to the levelset"),
("useVF", 1.0, "For density and viscosity smoothing"),
('movingDomain', True, "Moving domain and mesh option"),
('conservativeFlux', True,'Fix post-processing velocity bug for porous interface'),
])
# --- DOMAIN
domain = Domain.PlanarStraightLineGraphDomain()
rampS = rampE = opts.period
Tpaddle = 4 * opts.period
# --- Phisical constants
rho_0=998.2
nu_0 =1.004e-6
rho_1=1.205
nu_1 =1.500e-5
sigma_01=0.0
g =np.array([0.,-9.8,0.])
gAbs=sqrt(sum(g**2))
waterLevel = opts.water_level
# --- WAVE input
wave = wt.SteadyCurrent(U=np.array([0.,0.,0.]),mwl=opts.water_level)
#######################################################################################################################################################################################################################################################################
# ----- SHAPES ----- #
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
L_leftSpo = opts.tank_sponge[0]
L_rightSpo = opts.tank_sponge[1]
# --- paddle2D
dimx = opts.dimx
dimy = opts.dimy
ctr = opts.center
xc1 = ctr[0]-0.5*dimx
yc1 = ctr[1]-0.5*dimy
xc2 = xc1+dimx
yc2 = yc1
xc3 = xc2
yc3 = yc2+dimy
xc4 = xc1
yc4 = yc3
tank_dim = opts.tank_dim
xEnd = tank_dim[0]+L_rightSpo
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1., 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
'porousLayer': None,
'moving_porousLayer': None,
}
boundaryTags = {'y-' : 1,
'x+' : 2,
'y+' : 3,
'x-' : 4,
'sponge' : 5,
'porousLayer' : 6,
'moving_porousLayer' : 7,
}
##############################################################################################################################################################################################################
# paddle2D
############################################################################################################################################################################################################
if opts.paddle2D:
dimx = dimx
dimy = dimy
dim = (dimx,dimy)
coords = ctr
VCG = dim[1]/2.
width = opts.width
mass = opts.mass
volume = float(dimx*dimy*width)
density = float(mass/volume)
inertia = opts.paddle_inertia/mass/width
# --- Shape properties setup
paddle = st.Rectangle(domain, dim=dim, coords=coords)
xc1, yc1 = paddle.vertices[0][0], paddle.vertices[0][1]
xc2, yc2 = paddle.vertices[1][0], paddle.vertices[1][1]
# --- Body properties setup
paddle2D = bd.PaddleBody(shape=paddle, substeps=20)
paddle2D.setConstraints(free_x=opts.free_x, free_r=opts.free_r)
paddle2D.setRecordValues(filename='paddle2D', all_values=True)
paddle2D.inputMotion(InputMotion=opts.InputMotion, At=opts.stroke*np.array(opts.free_x), Tt=opts.period*np.array(opts.free_x), rampStart=rampS, rampEnd=rampE,Tend=Tpaddle)
##############################################################################################################################################################################################################
# Tank
#########################################################################################################################################################################################################
obstacle = [
[ [9.21,0.],
[9.64,0.216],
[9.84,0.216],
[9.84,0.]]
]
tank = st.TankWithObstacles2D(domain = domain, dim = tank_dim, obstacles = obstacle, hole = True)
#############################################################################################################################################################################################################################################################################################################################################################################################
# ----- BOUNDARY CONDITIONS ----- #
#############################################################################################################################################################################################################################################################################################################################################################################################
# --- Paddle2D
for bc in paddle.BC_list:
bc.setFreeSlip()
# --- Tank
tank.BC['y+'].setAtmosphere()
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['x-'].setFreeSlip()
tank.BC['x-'].setFixedNodes()
tank.BC['x+'].setFixedNodes()
tank.BC['y+'].setTank() # sliding mesh nodes
tank.BC['y-'].setTank() #sliding mesh nodes
tank.BC['sponge'].setNonMaterial()
tank.BC['sponge'].setFixedNodes()
########################################################################################################################################################################################################################################################################################################################################################
# ----- ABSORPTION ZONE BEHIND PADDLE ----- #
########################################################################################################################################################################################################################################################################################################################################################
tank_sponge = opts.tank_sponge
dragAlpha = 5*(2*np.pi/opts.period)/1e-6
tank.setSponge(x_n=tank_sponge[0], x_p=tank_sponge[1])
left = True
smoothing = opts.he*3.
tank.setGenerationZones(x_n=left, waves=wave, smoothing=smoothing, dragAlpha=dragAlpha)
############################################################################################################################################################################
# ----- Output Gauges ----- #
############################################################################################################################################################################
"""
T = opts.duration
gauge_dx=0.25
tank_dim_x=int(tank_dim[0])
nprobes=int(tank_dim_x/gauge_dx)+1
probes=np.linspace(0., tank_dim_x, nprobes)
PG=[]
if opts.paddle2D:
zProbes=yc1*0.5
else:
zProbes=opts.water_level*0.5
for i in probes:
PG.append((i, zProbes, 0.),)
if opts.paddle2D:
gauge_dy=0.01
tol=np.array([1*(10**-5),1*(10**-5),0.])
i_point_f=np.array([paddle.vertices[0][0],paddle.vertices[0][1],0.])
i_point_f += -tol #to avoid floating point error
i_point_b=np.array([paddle.vertices[1][0],paddle.vertices[1][1],0.])
i_point_b += tol #to avoid floating point error
yProbes = np.linspace(i_point_f[1],i_point_f[1]+dimy, int(dimy/gauge_dy)+1)
LG1=[]
LG2=[]
for j in yProbes:
LG1.append((i_point_f[0],j,0.),)
LG2.append((i_point_b[0],j,0.),)
tank.attachPointGauges(
'ls',
gauges=((('phi',),PG),),
activeTime = (0., T),
sampleRate=0.,
fileName='levelset_gauges.csv')
"""
######################################################################################################################################################################################################################
# Numerical Options and other parameters #
######################################################################################################################################################################################################################
he = opts.he
domain.MeshOptions.he = he
from math import *
from proteus import MeshTools, AuxiliaryVariables
import numpy
import proteus.MeshTools
from proteus import Domain
from proteus.Profiling import logEvent
from proteus.default_n import *
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
st.assembleDomain(domain)
#----------------------------------------------------
# Time stepping and velocity
#----------------------------------------------------
weak_bc_penalty_constant = 10.0/nu_0 #100
dt_fixed = 0.1
dt_init = min(0.1*dt_fixed,0.001)
T = opts.duration
nDTout= int(round(T/dt_fixed))
runCFL = opts.cfl
#----------------------------------------------------
# Discretization -- input options
#----------------------------------------------------
checkMass=False
applyCorrection=True
applyRedistancing=True
freezeLevelSet=opts.freezeLevelSet
useOnlyVF = False # if TRUE proteus uses only these modules --> twp_navier_stokes_p + twp_navier_stokes_n
# vof_p + vof_n
movingDomain=opts.movingDomain
useRANS = 0 # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega, 1998
# 3 -- K-Omega, 1988
genMesh=True
# By DEFAULT on the other files.py --> fullNewtonFlag = True
# multilevelNonlinearSolver & levelNonlinearSolver == NonlinearSolvers.Newton
useOldPETSc=False # if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.PETSc
# if FALSE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.KSP_petsc4py
useSuperlu = False #if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.LU
spaceOrder = 1
useHex = False # used for discretization, if 1.0 --> CubeGaussQuadrature
# ELSE --> SimplexGaussQuadrature
useRBLES = 0.0 # multiplied with subGridError
useMetrics = 1.0 # if 1.0 --> use of user's parameters as (ns_shockCapturingFactor, ns_lag_shockCapturing, ecc ...)
useVF = opts.useVF # used in the smoothing functions as (1.0-useVF)*smoothedHeaviside(eps_rho,phi) + useVF*fmin(1.0,fmax(0.0,vf))
# Input checks
if spaceOrder not in [1,2]:
print "INVALID: spaceOrder" + spaceOrder
sys.exit()
if useRBLES not in [0.0, 1.0]:
print "INVALID: useRBLES" + useRBLES
sys.exit()
if useMetrics not in [0.0, 1.0]:
print "INVALID: useMetrics"
sys.exit()
# Discretization
nd = 2
if spaceOrder == 1:
hFactor=1.0
if useHex:
basis=C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,3)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,3)
else:
basis=C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
#elementBoundaryQuadrature = SimplexLobattoQuadrature(nd-1,1)
elif spaceOrder == 2:
hFactor=0.5
if useHex:
basis=C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4)
else:
basis=C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
# Numerical parameters
ns_forceStrongDirichlet = False
backgroundDiffusionFactor=0.01
if useMetrics:
ns_shockCapturingFactor = 0.5 # magnifies numerical viscosity in NS (smoothening velocity fields)
ns_lag_shockCapturing = True # lagging numerical viscosity speedsup Newton but destabilzes the solution
ns_lag_subgridError = True # less nonlinear but less stable
ls_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening phi)
ls_lag_shockCapturing = True # less nonlinear but less stable
ls_sc_uref = 1.0 # reference gradient in numerical solution (higher=more diffusion)
ls_sc_beta = 1.5 # 1 is fully nonlinear, 2 is linear
vof_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening volume of fraction)
vof_lag_shockCapturing = True # less nonlinear but less stable
vof_sc_uref = 1.0
vof_sc_beta = 1.5
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_density = 3.0 # control width of water/air transition zone
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = ecH = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 1.0 # affects smoothing diffusion in mass conservation
redist_Newton = True
kappa_shockCapturingFactor = 0.5
kappa_lag_shockCapturing = True # False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.5
dissipation_shockCapturingFactor = 0.5
dissipation_lag_shockCapturing = True # False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.5
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
vof_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
ls_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mcorr_nl_atol_res = max(1.0e-12,0.0001*domain.MeshOptions.he**2)
rd_nl_atol_res = max(1.0e-12,0.01*domain.MeshOptions.he)
kappa_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
dissipation_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mesh_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
#turbulence
ns_closure=0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS >= 2:
ns_closure == 4
# Initial condition
waterLine_x = 2*tank_dim[0]
waterLine_z = opts.water_level
def waveHeight(x,t):
waterDepth = waveinput.eta(x, t) + waveinput.mwl
return waterDepth
def wavePhi(x,t):
[nd-1]- waveHeight(x,t)
def waveVF(x,t):
return smoothedHeaviside(epsFact_consrv_heaviside*he,wavePhi(x,t))
def signedDistance(x):
phi_x = x[0]-waterLine_x
phi_z = x[nd-1]-waterLine_z
if phi_x < 0.0:
if phi_z < 0.0:
return max(phi_x,phi_z)
else:
return phi_z
else:
if phi_z < 0.0:
return phi_x
else:
return sqrt(phi_x**2 + phi_z**2)
|
erdc-cm/air-water-vv
|
2d/waveOvertopping/OliveiraEtAl2012/OliveiraOvertopping.py
|
Python
|
mit
| 17,581
|
from scapy.all import *
import time
def record_press():
print "Pushed button"
data = {
"Timestamp": time.strftime("%Y-%m-%d %H:%M"),
}
print data
def arp_display(pkt):
if pkt[ARP].op == 1:
# ARP Probe
if pkt[ARP].psrc == '0.0.0.0':
print pkt[ARP].hwsrc
if pkt[ARP].hwsrc == 'your_mac_here...':
record_press()
else:
print "ARP Probe from unknown device: " + pkt[ARP].hwsrc
try:
print sniff(prn=arp_display, filter="arp", store=0, count=10)
except Exception as e:
print e.message
|
LocalhostAPI/dashbutton
|
app4.py
|
Python
|
mit
| 603
|
# what data to operate on
base_where_clause = "WHERE `source`='jhu-csse' AND `time_type`='day'"
### base_where_clause = "WHERE `source`='src2' AND `time_type`='day'" ###
# signal name construction
# NOTE: selecting these (unique) from the database takes 7-8 mins, so reconstructing here for efficiency
# TODO: maybe just put the damn UNIQUE query in here so you dont fat-finger it again george.
# also these hardcoded signals are unique to JHU data, or are at least not used by all sources.
signals = []
for case in ('confirmed_', 'deaths_'):
for period in ('7dav_', ''): # NOTE: that is a V as in "7 Day AVerage", not a "Y" as in "7 DAY"
for count in ('cumulative_', 'incidence_'):
for typ in ('num', 'prop'):
signals.append(case+period+count+typ)
### signals = ['sig2'] ###
# variable to split on, 'time_value' is good because its high cardinality is suitable for chunking
PARTITION_VARIABLE = 'time_value'
PARTITION_SPLITS = [20200101 + i*100 for i in range(10)] # first day of the month for jan - oct 2020 in YYYYMMDD form
### PARTITION_SPLITS = [1,2] ###
print('''
--
-- run this as:
-- python3 generate_islatest_fix_sql.py > islatest_fix.sql
-- mysql -vvv -p epidata < islatest_fix.sql
-- or:
-- date ; (python3 generate_islatest_fix_sql.py | mysql -vvv -p epidata ) ; date
--
''')
# create temp table
print("CREATE TABLE `islatest_fix` (`latest_id` INT(11) NOT NULL, PRIMARY KEY (`latest_id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8;")
# find latest issue by partition (and by signal) and save primary ids into temp table
for partition_index in range(len(PARTITION_SPLITS)+1):
ge_condition = 'TRUE' if partition_index == 0 else f'`{PARTITION_VARIABLE}` >= {PARTITION_SPLITS[partition_index - 1]}'
l_condition = 'TRUE' if partition_index == len(PARTITION_SPLITS) else f'`{PARTITION_VARIABLE}` < {PARTITION_SPLITS[partition_index]}'
partition_condition = f'({ge_condition}) AND ({l_condition})'
for sig in signals:
where_clause = base_where_clause + " AND `signal`='%s' AND %s" % (sig, partition_condition)
print('''
INSERT INTO `islatest_fix`
SELECT id FROM
( SELECT `source`, `signal`, `time_type`, `geo_type`, `geo_value`, `time_value`, MAX(`issue`) AS `issue` FROM `covidcast`
''' + where_clause + '''
GROUP BY `source`, `signal`, `time_type`, `geo_type`, `geo_value`, `time_value`
) b
LEFT JOIN `covidcast` a
USING (`source`, `signal`, `time_type`, `geo_type`, `geo_value`, `time_value`, `issue`);
''')
# clear any current (potentially erroneous) is_latest_issue flags
print("UPDATE `covidcast` SET `is_latest_issue`=0 " + base_where_clause + " AND `is_latest_issue`=1;")
# re-set proper is_latest_issue flags
print("UPDATE (SELECT `latest_id` FROM `islatest_fix`) xxx LEFT JOIN `covidcast` ON `xxx`.`latest_id`=`covidcast`.`id` SET `covidcast`.`is_latest_issue`=1;")
# clean up temp table
print("-- TODO: drop this table")
print("-- DROP TABLE `islatest_fix`;")
|
cmu-delphi/delphi-epidata
|
src/acquisition/covidcast/generate_islatest_fix_sql.py
|
Python
|
mit
| 2,959
|
# Generated by Django 2.2.1 on 2019-08-09 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quotations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=50, unique=True)),
],
),
migrations.AddField(
model_name='author',
name='tags',
field=models.ManyToManyField(to='quotations.Tag'),
),
]
|
jessamynsmith/underquoted
|
quotations/migrations/0002_auto_20190808_2049.py
|
Python
|
mit
| 677
|
"""
Unit tests for genome_annotation
"""
import logging
from unittest import skipUnless
from . import shared
from doekbase.data_api.annotation.genome_annotation import GenomeAnnotationAPI
from doekbase.data_api.sequence.assembly import AssemblyAPI
from doekbase.data_api.taxonomy.taxon.api import TaxonAPI
_log = logging.getLogger(__name__)
genome_new = "PrototypeReferenceGenomes/kb|g.166819"
genome_old = "OriginalReferenceGenomes/kb|g.166819"
t_new = None
t_old = None
def setup():
shared.setup()
global t_new, t_old
t_new = GenomeAnnotationAPI(shared.services, shared.token, genome_new)
t_old = GenomeAnnotationAPI(shared.services, shared.token, genome_old)
######## New Genome type tests
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_taxon_new():
_log.info("Input {}".format(genome_new))
taxon = t_new.get_taxon()
_log.info("Output {}".format(taxon))
assert isinstance(taxon, TaxonAPI)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_assembly_new():
_log.info("Input {}".format(genome_new))
assembly = t_new.get_assembly()
_log.info("Output {}".format(assembly))
assert isinstance(assembly, AssemblyAPI)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_types_new():
_log.info("Input {}".format(genome_new))
feature_types = t_new.get_feature_types()
_log.info("Output {}".format(feature_types))
assert isinstance(feature_types, list)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_descriptions_new():
_log.info("Input {}".format(genome_new))
feature_type_descriptions = t_new.get_feature_type_descriptions()
_log.info("Output {}".format(feature_type_descriptions))
assert isinstance(feature_type_descriptions, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_ids_new():
_log.info("Input {}".format(genome_new))
feature_ids = t_new.get_feature_ids()
_log.info("Output {}".format(type(feature_ids)))
assert isinstance(feature_ids, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_counts_new():
_log.info("Input {}".format(genome_new))
feature_type_counts = t_new.get_feature_type_counts()
_log.info("Output {}".format(feature_type_counts))
assert isinstance(feature_type_counts, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_locations_new():
_log.info("Input {}".format(genome_new))
feature_locations = t_new.get_feature_locations()
_log.info("Output {}".format(len(feature_locations)))
assert isinstance(feature_locations, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_dna_new():
_log.info("Input {}".format(genome_new))
feature_dna = t_new.get_feature_dna()
_log.info("Output {}".format(len(feature_dna)))
assert isinstance(feature_dna, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_functions_new():
_log.info("Input {}".format(genome_new))
feature_functions = t_new.get_feature_functions()
_log.info("Output {}".format(len(feature_functions)))
assert isinstance(feature_functions, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_aliases_new():
_log.info("Input {}".format(genome_new))
feature_aliases = t_new.get_feature_aliases()
_log.info("Output {}".format(len(feature_aliases)))
assert isinstance(feature_aliases, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_publications_new():
_log.info("Input {}".format(genome_new))
feature_publications = t_new.get_feature_publications()
_log.info("Output {}".format(len(feature_publications)))
assert isinstance(feature_publications, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_features_new():
_log.info("Input {}".format(genome_new))
features = t_new.get_features()
_log.info("Output {}".format(len(features)))
assert isinstance(features, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_proteins_new():
_log.info("Input {}".format(genome_new))
proteins = t_new.get_proteins()
_log.info("Output {}".format(len(proteins)))
assert isinstance(proteins, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_valid_new():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_cds_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 2
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_invalid_new():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_cds_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_valid_new():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_mrna_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 2
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.9999999999"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_mrna_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_valid_new():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_gene_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 2
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_invalid_new():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_gene_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_valid_new():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_cds_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 2
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_cds_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_valid_new():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_mrna_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 2
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_mrna_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_valid_new():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_gene_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 2
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_invalid_new():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.999999"]
_log.info("Input {} {}".format(genome_new, inputs))
subset_features = t_new.get_gene_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
######## Old Genome Annotation Type tests
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_taxon_old():
_log.info("Input {}".format(genome_old))
taxon = t_old.get_taxon()
_log.info("Output {}".format(taxon))
assert isinstance(taxon, TaxonAPI)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_assembly_old():
_log.info("Input {}".format(genome_old))
assembly = t_old.get_assembly()
_log.info("Output {}".format(assembly))
assert isinstance(assembly, AssemblyAPI)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_types_old():
_log.info("Input {}".format(genome_old))
feature_types = t_old.get_feature_types()
_log.info("Output {}".format(feature_types))
assert isinstance(feature_types, list)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_descriptions_old():
_log.info("Input {}".format(genome_old))
feature_type_descriptions = t_old.get_feature_type_descriptions()
_log.info("Output {}".format(feature_type_descriptions))
assert isinstance(feature_type_descriptions, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_ids_old():
_log.info("Input {}".format(genome_old))
feature_ids = t_old.get_feature_ids()
_log.info("Output {}".format(type(feature_ids)))
assert isinstance(feature_ids, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_type_counts_old():
_log.info("Input {}".format(genome_old))
feature_type_counts = t_old.get_feature_type_counts()
_log.info("Output {}".format(feature_type_counts))
assert isinstance(feature_type_counts, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_locations_old():
_log.info("Input {}".format(genome_old))
feature_locations = t_old.get_feature_locations()
_log.info("Output {}".format(len(feature_locations)))
assert isinstance(feature_locations, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_dna_old():
_log.info("Input {}".format(genome_old))
feature_dna = t_old.get_feature_dna()
_log.info("Output {}".format(len(feature_dna)))
assert isinstance(feature_dna, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_functions_old():
_log.info("Input {}".format(genome_old))
feature_functions = t_old.get_feature_functions()
_log.info("Output {}".format(len(feature_functions)))
assert isinstance(feature_functions, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_aliases_old():
_log.info("Input {}".format(genome_old))
feature_aliases = t_old.get_feature_aliases()
_log.info("Output {}".format(len(feature_aliases)))
assert isinstance(feature_aliases, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_feature_publications_old():
_log.info("Input {}".format(genome_old))
feature_publications = t_old.get_feature_publications()
_log.info("Output {}".format(len(feature_publications)))
assert isinstance(feature_publications, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_features_old():
_log.info("Input {}".format(genome_old))
features = t_old.get_features()
_log.info("Output {}".format(len(features)))
assert isinstance(features, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_proteins_old():
_log.info("Input {}".format(genome_old))
proteins = t_old.get_proteins()
_log.info("Output {}".format(len(proteins)))
assert isinstance(proteins, dict)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_valid_old():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_cds_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_mrna_invalid_old():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_cds_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_valid_old():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_mrna_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_cds_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.9999999999"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_mrna_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_valid_old():
inputs = ["kb|g.166819.mRNA.0", "kb|g.166819.mRNA.238"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_gene_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_mrna_invalid_old():
inputs = ["kb|g.166819.mRNA.99999999999", "kb|g.166819.CDS.1"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_gene_by_mrna(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_valid_old():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_cds_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_cds_by_gene_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_cds_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_valid_old():
inputs = ["kb|g.166819.locus.256", "kb|g.166819.locus.112"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_mrna_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_mrna_by_gene_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.locus.999999"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_mrna_by_gene(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_valid_old():
inputs = ["kb|g.166819.CDS.0", "kb|g.166819.CDS.278"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_gene_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_gene_by_cds_invalid_old():
inputs = ["kb|g.166819.mRNA.1", "kb|g.166819.CDS.999999"]
_log.info("Input {} {}".format(genome_old, inputs))
subset_features = t_old.get_gene_by_cds(inputs)
_log.info("Output {}".format(subset_features))
assert len(subset_features) == 0
|
realmarcin/data_api
|
lib/doekbase/data_api/tests/test_genome_annotation_api.py
|
Python
|
mit
| 17,000
|
__author__ = 'mriegel'
from django.contrib import admin
from models import *
class CurrencyAdmin(admin.ModelAdmin):
list_display = ('name', 'sign', 'base_rate', )
pass
class CurrencyExchangeRateAdmin(admin.ModelAdmin):
list_display = ('date', 'rate', )
class CompanyAdmin(admin.ModelAdmin):
list_display = ('name', 'country',)
list_filter = ('country',)
pass
class SectorAdmin(admin.ModelAdmin):
list_display = ('name', )
pass
class StockAdmin(admin.ModelAdmin):
list_display = ('name', 'country', 'sector', 'type', 'symbols', )
def symbols(self, obj):
return "%s / %s / %s" % (obj.symbol, obj.wkn, obj.isin)
def country(self, obj):
return obj.company.country
symbols.short_description = "Symbol / WKN / ISIN"
list_filter = ('type', 'sector',)
class StockRateAdmin(admin.ModelAdmin):
list_display = ('stock', 'stock_exchange', 'date', 'close', )
list_filter = ('stock', 'stock_exchange', )
pass
class StockDividendAdmin(admin.ModelAdmin):
list_display = ('stock', 'date', 'value', )
list_filter = ('stock',)
pass
class StockExchangeAdmin(admin.ModelAdmin):
list_display = ('name', 'symbol_yahoo', 'symbol_finanzennet')
pass
admin.site.register(Currency, CurrencyAdmin)
admin.site.register(CurrencyExchangeRate, CurrencyExchangeRateAdmin)
admin.site.register(Sector, SectorAdmin)
admin.site.register(Company, CompanyAdmin)
admin.site.register(Stock, StockAdmin)
admin.site.register(StockRate, StockRateAdmin)
admin.site.register(StockDividend, StockDividendAdmin)
admin.site.register(StockExchange, StockExchangeAdmin)
|
mrcrgl/stockstore
|
app/admin.py
|
Python
|
mit
| 1,640
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from itertools import product
from datetime import date
import holidays
class TestDE(unittest.TestCase):
def setUp(self):
self.holidays = holidays.DE()
self.prov_hols = {
prov: holidays.DE(subdiv=prov) for prov in holidays.DE.subdivisions
}
def test_no_data_before_1990(self):
de_1989 = sum(
holidays.DE(years=[1989], subdiv=p)
for p in holidays.DE.subdivisions
)
self.assertEqual(len(de_1989), 0)
def test_all_holidays_present(self):
de_2015 = sum(
holidays.DE(years=[2015], subdiv=p)
for p in holidays.DE.subdivisions
)
in_2015 = sum((de_2015.get_list(key) for key in de_2015), [])
all_de = [
"Neujahr",
"Heilige Drei Könige",
"Karfreitag",
"Ostersonntag",
"Ostermontag",
"Erster Mai",
"Christi Himmelfahrt",
"Pfingstsonntag",
"Pfingstmontag",
"Fronleichnam",
"Mariä Himmelfahrt",
"Tag der Deutschen Einheit",
"Reformationstag",
"Allerheiligen",
"Buß- und Bettag",
"Erster Weihnachtstag",
"Zweiter Weihnachtstag",
]
for holiday in all_de:
self.assertIn(holiday, in_2015, "missing: {}".format(holiday))
for holiday in in_2015:
self.assertIn(holiday, all_de, "extra: {}".format(holiday))
def test_fixed_holidays(self):
fixed_days_whole_country = (
(1, 1), # Neujahr
(5, 1), # Maifeiertag
(10, 3), # Tag der Deutschen Einheit
(12, 25), # Erster Weihnachtstag
(12, 26), # Zweiter Weihnachtstag
)
for y, (m, d) in product(range(1991, 2050), fixed_days_whole_country):
self.assertIn(date(y, m, d), self.holidays)
def test_tag_der_deutschen_einheit_in_1990(self):
self.assertIn(date(1990, 10, 3), self.holidays)
def test_heilige_drei_koenige(self):
provinces_that_have = {"BW", "BY", "BYP", "ST"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, year in product(provinces_that_have, range(1991, 2050)):
self.assertIn(date(year, 1, 6), self.prov_hols[province])
for province, year in product(provinces_that_dont, range(1991, 2050)):
self.assertNotIn(date(year, 1, 6), self.prov_hols[province])
def test_karfreitag(self):
known_good = [
(2014, 4, 18),
(2015, 4, 3),
(2016, 3, 25),
(2017, 4, 14),
(2018, 3, 30),
(2019, 4, 19),
(2020, 4, 10),
(2021, 4, 2),
(2022, 4, 15),
(2023, 4, 7),
(2024, 3, 29),
]
for province, (y, m, d) in product(
holidays.DE.subdivisions, known_good
):
self.assertIn(date(y, m, d), self.prov_hols[province])
def test_ostersonntag(self):
known_good = [
(2014, 4, 20),
(2015, 4, 5),
(2016, 3, 27),
(2017, 4, 16),
(2018, 4, 1),
(2019, 4, 21),
(2020, 4, 12),
(2021, 4, 4),
(2022, 4, 17),
(2023, 4, 9),
(2024, 3, 31),
]
provinces_that_have = {"BB"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, (y, m, d) in product(provinces_that_have, known_good):
self.assertIn(date(y, m, d), self.prov_hols[province])
for province, (y, m, d) in product(provinces_that_dont, known_good):
self.assertNotIn(date(y, m, d), self.prov_hols[province])
def test_ostermontag(self):
known_good = [
(2014, 4, 21),
(2015, 4, 6),
(2016, 3, 28),
(2017, 4, 17),
(2018, 4, 2),
(2019, 4, 22),
(2020, 4, 13),
(2021, 4, 5),
(2022, 4, 18),
(2023, 4, 10),
(2024, 4, 1),
]
for province, (y, m, d) in product(
holidays.DE.subdivisions, known_good
):
self.assertIn(date(y, m, d), self.prov_hols[province])
def test_75_jahrestag_beendigung_zweiter_weltkrieg(self):
known_good = [(2020, 5, 8)]
provinces_that_have = {"BE"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, (y, m, d) in product(provinces_that_have, known_good):
self.assertIn(date(y, m, d), self.prov_hols[province])
for province, (y, m, d) in product(provinces_that_dont, known_good):
self.assertNotIn(date(y, m, d), self.prov_hols[province])
def test_christi_himmelfahrt(self):
known_good = [
(2014, 5, 29),
(2015, 5, 14),
(2016, 5, 5),
(2017, 5, 25),
(2018, 5, 10),
(2019, 5, 30),
(2020, 5, 21),
(2021, 5, 13),
(2022, 5, 26),
(2023, 5, 18),
(2024, 5, 9),
]
for province, (y, m, d) in product(
holidays.DE.subdivisions, known_good
):
self.assertIn(date(y, m, d), self.prov_hols[province])
def test_weltkindertag(self):
known_good = [(2019, 9, 20), (2021, 9, 20)]
provinces_that_have = {"TH"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, (y, m, d) in product(provinces_that_have, known_good):
self.assertIn(date(y, m, d), self.prov_hols[province])
for province, (y, m, d) in product(provinces_that_dont, known_good):
self.assertNotIn(date(y, m, d), self.prov_hols[province])
def test_frauentag(self):
known_good = [
(2019, 3, 8),
]
provinces_that_have = {"BE"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, (y, m, d) in product(provinces_that_have, known_good):
self.assertIn(date(y, m, d), self.prov_hols[province])
for province, (y, m, d) in product(provinces_that_dont, known_good):
self.assertNotIn(date(y, m, d), self.prov_hols[province])
def test_pfingstsonntag(self):
known_good = [
(2014, 6, 8),
(2015, 5, 24),
(2016, 5, 15),
(2017, 6, 4),
(2018, 5, 20),
(2019, 6, 9),
(2020, 5, 31),
(2021, 5, 23),
(2022, 6, 5),
(2023, 5, 28),
(2024, 5, 19),
]
provinces_that_have = {"BB"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, (y, m, d) in product(provinces_that_have, known_good):
self.assertIn(date(y, m, d), self.prov_hols[province])
for province, (y, m, d) in product(provinces_that_dont, known_good):
self.assertNotIn(date(y, m, d), self.prov_hols[province])
def test_pfingstmontag(self):
known_good = [
(2014, 6, 9),
(2015, 5, 25),
(2016, 5, 16),
(2017, 6, 5),
(2018, 5, 21),
(2019, 6, 10),
(2020, 6, 1),
(2021, 5, 24),
(2022, 6, 6),
(2023, 5, 29),
(2024, 5, 20),
]
for province, (y, m, d) in product(
holidays.DE.subdivisions, known_good
):
self.assertIn(date(y, m, d), self.prov_hols[province])
def test_fronleichnam(self):
known_good = [
(2014, 6, 19),
(2015, 6, 4),
(2016, 5, 26),
(2017, 6, 15),
(2018, 5, 31),
(2019, 6, 20),
(2020, 6, 11),
(2021, 6, 3),
(2022, 6, 16),
(2023, 6, 8),
(2024, 5, 30),
]
provinces_that_have = {"BW", "BY", "BYP", "HE", "NW", "RP", "SL"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, (y, m, d) in product(provinces_that_have, known_good):
self.assertIn(date(y, m, d), self.prov_hols[province])
for province, (y, m, d) in product(provinces_that_dont, known_good):
self.assertNotIn(date(y, m, d), self.prov_hols[province])
def test_mariae_himmelfahrt(self):
provinces_that_have = {"BY", "SL"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, year in product(provinces_that_have, range(1991, 2050)):
self.assertIn(date(year, 8, 15), self.prov_hols[province])
for province, year in product(provinces_that_dont, range(1991, 2050)):
self.assertNotIn(date(year, 8, 15), self.prov_hols[province])
def test_reformationstag(self):
prov_that_have = {"BB", "MV", "SN", "ST", "TH"}
prov_yes_since_2018 = prov_that_have.union({"HB", "HH", "NI", "SH"})
prov_that_dont = set(holidays.DE.subdivisions) - prov_that_have
prov_not_since_2018 = (
set(holidays.DE.subdivisions) - prov_yes_since_2018
)
for province, year in product(prov_that_have, range(1991, 2050)):
# in 2017 all states got the reformationstag for that year
if year == 2017:
continue
self.assertIn(date(year, 10, 31), self.prov_hols[province])
# additional provinces got this holiday 2018
for province, year in product(prov_yes_since_2018, range(2018, 2050)):
self.assertIn(date(year, 10, 31), self.prov_hols[province])
for province, year in product(prov_that_dont, range(1991, 2017)):
self.assertNotIn(date(year, 10, 31), self.prov_hols[province])
for province, year in product(prov_not_since_2018, range(2018, 2050)):
self.assertNotIn(date(year, 10, 31), self.prov_hols[province])
# check the 2017 case where all states have the reformationstag
for province in holidays.DE.subdivisions:
self.assertIn(date(2017, 10, 31), self.prov_hols[province])
def test_allerheiligen(self):
provinces_that_have = {"BW", "BY", "BYP", "NW", "RP", "SL"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, year in product(provinces_that_have, range(1991, 2050)):
self.assertIn(date(year, 11, 1), self.prov_hols[province])
for province, year in product(provinces_that_dont, range(1991, 2050)):
self.assertNotIn(date(year, 11, 1), self.prov_hols[province])
def test_buss_und_bettag(self):
known_good = [
(2014, 11, 19),
(2015, 11, 18),
(2016, 11, 16),
(2017, 11, 22),
(2018, 11, 21),
(2019, 11, 20),
(2020, 11, 18),
(2021, 11, 17),
(2022, 11, 16),
(2023, 11, 22),
(2024, 11, 20),
]
provinces_that_have = {"SN"}
provinces_that_dont = (
set(holidays.DE.subdivisions) - provinces_that_have
)
for province, (y, m, d) in product(provinces_that_have, known_good):
self.assertIn(date(y, m, d), self.prov_hols[province])
for province, (y, m, d) in product(provinces_that_dont, known_good):
self.assertNotIn(date(y, m, d), self.prov_hols[province])
def test_internationaler_frauentag(self):
prov_that_have = {"BE"}
prov_that_dont = set(holidays.DE.subdivisions) - prov_that_have
for province, year in product(
holidays.DE.subdivisions, range(1991, 2018)
):
self.assertNotIn(date(year, 3, 8), self.prov_hols[province])
for province, year in product(prov_that_have, range(2019, 2050)):
self.assertIn(date(year, 3, 8), self.prov_hols[province])
for province, year in product(prov_that_dont, range(2019, 2050)):
self.assertNotIn(date(year, 3, 8), self.prov_hols[province])
|
dr-prodigy/python-holidays
|
test/countries/test_germany.py
|
Python
|
mit
| 12,969
|
class BaseModel(object):
def __eq__(self, other):
return type(self) == type(other) and self.deep_eq_dict(self.__dict__, other.__dict__)
def deep_eq_dict(self, a, b):
if a.keys() != b.keys():
return False
for k in a.keys():
if type(k) == dict:
if not self.deep_eq_dict(a, b):
return False
else:
if a != b:
return False
return True
|
maveron58/indiana
|
models/base/base_model.py
|
Python
|
mit
| 481
|