text stringlengths 4 1.02M | meta dict |
|---|---|
"""Base class for flat config players."""
from typing import List
import abc
from mpf.core.config_player import ConfigPlayer
class FlatConfigPlayer(ConfigPlayer, metaclass=abc.ABCMeta):
"""Flat show players."""
__slots__ = [] # type: List[str]
def validate_config_entry(self, settings, name):
"""Validate one entry of this player."""
config = self._parse_config(settings, name)
return config
def get_full_config(self, value):
"""Return full config."""
for element in value:
value[element] = super().get_full_config(value[element])
return value
@abc.abstractmethod
def play(self, settings, context, calling_context, priority=0, **kwargs):
"""Directly play player."""
# **kwargs since this is an event callback
raise NotImplementedError
@abc.abstractmethod
def get_express_config(self, value) -> dict:
"""Parse short config version.
Implements "express" settings for this config_player which is what
happens when a config is passed as a string instead of a full config
dict. (This is detected automatically and this method is only called
when the config is not a dict.)
For example, the led_player uses the express config to parse a string
like 'ff0000-f.5s' and translate it into:
color: 220000
fade: 500
Since every config_player is different, this method raises a
NotImplementedError and most be configured in the child class.
Args:
----
value: The single line string value from a config file.
Returns a dictionary (which will then be passed through the config
validator)
"""
raise NotImplementedError(self.config_file_section)
| {
"content_hash": "d73f273c513aa028655af30ba6e5ee3d",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 31.75438596491228,
"alnum_prop": 0.6497237569060773,
"repo_name": "missionpinball/mpf",
"id": "e665ace6fcb404fc5f6167e715da220ae3e8a303",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpf/config_players/flat_config_player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "C++",
"bytes": "4019"
},
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "4532953"
}
],
"symlink_target": ""
} |
import copy
from cms.test_utils.project.sampleapp.cms_apps import NamespacedApp, SampleApp, SampleApp2
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, Permission, Group
from django.contrib.sites.models import Site
from django.template import Template, TemplateSyntaxError
from django.template.context import Context
from django.test.utils import override_settings
from django.utils.translation import activate, override as force_language
from cms.apphook_pool import apphook_pool
from menus.base import NavigationNode
from menus.menu_pool import menu_pool, _build_nodes_inner_for_one_menu
from menus.models import CacheKey
from menus.utils import mark_descendants, find_selected, cut_levels
from cms.api import create_page, create_title
from cms.cms_menus import get_visible_nodes
from cms.models import Page, ACCESS_PAGE_AND_DESCENDANTS, Title
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.test_utils.project.sampleapp.cms_menus import SampleAppMenu, StaticMenu, StaticMenu2
from cms.test_utils.fixtures.menus import (MenusFixture, SubMenusFixture,
SoftrootFixture, ExtendedMenusFixture)
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_ADD, URL_CMS_PAGE
from cms.test_utils.util.context_managers import apphooks, LanguageOverride
from cms.test_utils.util.mock import AttributeObject
from cms.utils import get_current_site
from cms.utils.conf import get_cms_setting
class BaseMenuTest(CMSTestCase):
def _get_nodes(self, path='/'):
node1 = NavigationNode('1', '/1/', 1)
node2 = NavigationNode('2', '/2/', 2, 1)
node3 = NavigationNode('3', '/3/', 3, 2)
node4 = NavigationNode('4', '/4/', 4, 2)
node5 = NavigationNode('5', '/5/', 5)
nodes = [node1, node2, node3, node4, node5]
tree = _build_nodes_inner_for_one_menu([n for n in nodes], "test")
request = self.get_request(path)
renderer = menu_pool.get_renderer(request)
renderer.apply_modifiers(tree, request)
return tree, nodes
def setUp(self):
super(BaseMenuTest, self).setUp()
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'CMSMenu': self.old_menu['CMSMenu']}
menu_pool.clear(settings.SITE_ID)
activate("en")
def tearDown(self):
menu_pool.menus = self.old_menu
super(BaseMenuTest, self).tearDown()
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
class MenuDiscoveryTest(ExtendedMenusFixture, CMSTestCase):
def setUp(self):
super(MenuDiscoveryTest, self).setUp()
menu_pool.discovered = False
self.old_menu = menu_pool.menus
menu_pool.menus = {}
menu_pool.discover_menus()
menu_pool.register_menu(SampleAppMenu)
menu_pool.register_menu(StaticMenu)
menu_pool.register_menu(StaticMenu2)
def tearDown(self):
menu_pool.menus = self.old_menu
super(MenuDiscoveryTest, self).tearDown()
def test_menu_registered(self):
menu_pool.discovered = False
menu_pool.discover_menus()
# The following tests that get_registered_menus()
# returns all menus registered based on the for_rendering flag
# A list of menu classes registered regardless of whether they
# have instances attached or not
registered = menu_pool.get_registered_menus(for_rendering=False)
# A list of menu classes registered and filter out any attached menu
# if it does not have instances.
registered_for_rendering = menu_pool.get_registered_menus(for_rendering=True)
# We've registered three menus
self.assertEqual(len(registered), 3)
# But two of those are attached menus and shouldn't be rendered.
self.assertEqual(len(registered_for_rendering), 1)
# Attached both menus to separate pages
create_page("apphooked-page", "nav_playground.html", "en",
published=True,
navigation_extenders='StaticMenu')
create_page("apphooked-page", "nav_playground.html", "en",
published=True,
navigation_extenders='StaticMenu2')
registered = menu_pool.get_registered_menus(for_rendering=False)
registered_for_rendering = menu_pool.get_registered_menus(for_rendering=True)
# The count should be 3 but grows to 5 because of the two published instances.
# Even though we've registered three menus, the total is give because two
# are attached menus and each attached menu has two instances.
self.assertEqual(len(registered), 5)
self.assertEqual(len(registered_for_rendering), 5)
def test_menu_registered_in_renderer(self):
menu_pool.discovered = False
menu_pool.discover_menus()
# The following tests that a menu renderer calculates the registered
# menus on a request basis.
request_1 = self.get_request('/en/')
request_1_renderer = menu_pool.get_renderer(request_1)
registered = menu_pool.get_registered_menus(for_rendering=False)
self.assertEqual(len(registered), 3)
self.assertEqual(len(request_1_renderer.menus), 1)
create_page("apphooked-page", "nav_playground.html", "en",
published=True,
navigation_extenders='StaticMenu')
create_page("apphooked-page", "nav_playground.html", "en",
published=True,
navigation_extenders='StaticMenu2')
request_2 = self.get_request('/en/')
request_2_renderer = menu_pool.get_renderer(request_2)
# The count should be 3 but grows to 5 because of the two published instances.
self.assertEqual(len(request_2_renderer.menus), 5)
def test_menu_expanded(self):
menu_pool.discovered = False
menu_pool.discover_menus()
with self.settings(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
with apphooks(SampleApp):
page = create_page("apphooked-page", "nav_playground.html", "en",
published=True, apphook="SampleApp",
navigation_extenders='StaticMenu')
self.assertTrue(menu_pool.discovered)
menus = menu_pool.get_registered_menus()
self.assertTrue(menu_pool.discovered)
# Counts the number of StaticMenu (which is expanded) and StaticMenu2
# (which is not) and checks the key name for the StaticMenu instances
static_menus = 2
static_menus_2 = 1
for key, menu in menus.items():
if key.startswith('StaticMenu:'):
static_menus -= 1
self.assertTrue(key.endswith(str(page.get_public_object().pk)) or key.endswith(str(page.get_draft_object().pk)))
if key == 'StaticMenu2':
static_menus_2 -= 1
self.assertEqual(static_menus, 0)
self.assertEqual(static_menus_2, 0)
def test_multiple_menus(self):
with self.settings(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
with apphooks(NamespacedApp, SampleApp2):
apphook_pool.discovered = False
apphook_pool.discover_apps()
create_page("apphooked-page", "nav_playground.html", "en",
published=True, apphook="SampleApp2")
create_page("apphooked-page", "nav_playground.html", "en",
published=True,
navigation_extenders='StaticMenu')
create_page("apphooked-page", "nav_playground.html", "en",
published=True, apphook="NamespacedApp", apphook_namespace='whatever',
navigation_extenders='StaticMenu')
self.assertEqual(len(menu_pool.get_menus_by_attribute("cms_enabled", True)), 2)
class ExtendedFixturesMenuTests(ExtendedMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
| + P9
| + P10
| + P11
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with self.settings(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_show_submenu_nephews(self):
page_2 = self.get_page(2)
context = self.get_context(path=page_2.get_absolute_url(), page=page_2)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 1 %}")
tpl.render(context)
nodes = context["children"]
# P2 is the selected node
self.assertTrue(nodes[0].selected)
# Should include P10 but not P11
self.assertEqual(len(nodes[1].children), 1)
self.assertFalse(nodes[1].children[0].children)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 %}")
tpl.render(context)
nodes = context["children"]
# should now include both P10 and P11
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[1].children[0].children), 1)
def test_show_submenu_template_root_level_none_no_nephew_limit(self):
root = self.get_page(1)
context = self.get_context(path=root.get_absolute_url(), page=root)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 None 100 %}")
tpl.render(context)
nodes = context["children"]
# default nephew limit, P2 and P9 in the nodes list
self.assertEqual(len(nodes), 2)
class FixturesMenuTests(MenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with self.settings(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_basic_cms_menu(self):
menus = menu_pool.get_registered_menus()
self.assertEqual(len(menus), 1)
with force_language("en"):
response = self.client.get(self.get_pages_root()) # path = '/'
self.assertEqual(response.status_code, 200)
request = self.get_request()
renderer = menu_pool.get_renderer(request)
# test the cms menu class
menu = renderer.get_menu('CMSMenu')
nodes = menu.get_nodes(request)
pages = self.get_all_pages().order_by('node__path')
self.assertEqual(len(nodes), len(pages))
self.assertSequenceEqual(
[node.get_absolute_url() for node in nodes],
[page.get_absolute_url() for page in pages],
)
def test_show_new_draft_page_in_menu(self):
"""
Test checks if the menu cache is cleaned after create a new draft page.
"""
with self.login_user_context(self.get_superuser()):
page_data_1 = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data_1)
self.assertRedirects(response, URL_CMS_PAGE)
request = self.get_request('/')
renderer = menu_pool.get_renderer(request)
renderer.draft_mode_active = True
renderer.get_nodes()
self.assertEqual(CacheKey.objects.count(), 1)
with self.login_user_context(self.get_superuser()):
page_data_2 = self.get_new_page_data()
self.assertNotEqual(page_data_1['slug'], page_data_2['slug'])
response = self.client.post(URL_CMS_PAGE_ADD, page_data_2)
self.assertRedirects(response, URL_CMS_PAGE)
page = Title.objects.drafts().get(slug=page_data_2['slug']).page
request = self.get_request('/')
renderer = menu_pool.get_renderer(request)
renderer.draft_mode_active = True
nodes = renderer.get_nodes()
self.assertEqual(CacheKey.objects.count(), 1)
self.assertEqual(page.get_title(), nodes[-1].title)
def test_show_page_in_menu_after_move_page(self):
"""
Test checks if the menu cache is cleaned after move page.
"""
page = create_page('page to move', 'nav_playground.html', 'en', published=True)
request = self.get_request('/')
renderer = menu_pool.get_renderer(request)
renderer.draft_mode_active = True
nodes_before = renderer.get_nodes()
index_before = [i for i, s in enumerate(nodes_before) if s.title == page.get_title()]
self.assertEqual(CacheKey.objects.count(), 1)
with self.login_user_context(self.get_superuser()):
# Moves the page to the second position in the tree
data = {'id': page.pk, 'position': 1}
endpoint = self.get_admin_url(Page, 'move_page', page.pk)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(CacheKey.objects.count(), 0)
request = self.get_request('/')
renderer = menu_pool.get_renderer(request)
renderer.draft_mode_active = True
nodes_after = renderer.get_nodes()
index_after = [i for i, s in enumerate(nodes_after) if s.title == page.get_title()]
self.assertEqual(CacheKey.objects.count(), 1)
self.assertNotEqual(
index_before,
index_after,
'Index should not be the same after move page in navigation'
)
def test_show_page_in_menu_after_copy_page(self):
"""
Test checks if the menu cache is cleaned after copy page.
"""
page = create_page('page to copy', 'nav_playground.html', 'en', published=True)
request = self.get_request('/')
renderer = menu_pool.get_renderer(request)
renderer.draft_mode_active = True
nodes_before = renderer.get_nodes()
self.assertEqual(CacheKey.objects.count(), 1)
with self.login_user_context(self.get_superuser()):
# Copy the page
data = {
'position': 1,
'source_site': 1,
'copy_permissions': 'on',
'copy_moderation': 'on',
}
endpoint = self.get_admin_url(Page, 'copy_page', page.pk)
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(CacheKey.objects.count(), 0)
request = self.get_request('/')
renderer = menu_pool.get_renderer(request)
renderer.draft_mode_active = True
nodes_after = renderer.get_nodes()
self.assertEqual(CacheKey.objects.count(), 1)
self.assertGreater(len(nodes_after), len(nodes_before))
self.assertEqual(page.get_title(), nodes_after[-1].title)
def test_cms_menu_public_with_multiple_languages(self):
for page in Page.objects.drafts():
create_title(
language='de',
title=page.get_title('en'),
page=page,
slug='{}-de'.format(page.get_slug('en'))
)
pages = self.get_all_pages().order_by('node__path')
# Fallbacks on
request = self.get_request(path='/de/', language='de')
renderer = menu_pool.get_renderer(request)
menu = renderer.get_menu('CMSMenu')
nodes = menu.get_nodes(request)
self.assertEqual(len(nodes), len(pages))
with force_language('de'):
# Current language is "de" but urls should still point
# to "en" because of fallbacks.
self.assertSequenceEqual(
[node.get_absolute_url() for node in nodes],
[page.get_absolute_url('en', fallback=False) for page in pages],
)
# Fallbacks off
request = self.get_request(path='/de/', language='de')
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][1]['hide_untranslated'] = True
with self.settings(CMS_LANGUAGES=lang_settings):
renderer = menu_pool.get_renderer(request)
menu = renderer.get_menu('CMSMenu')
nodes = menu.get_nodes(request)
self.assertEqual(len(nodes), 0)
for page in Page.objects.drafts().order_by('node__path'):
page.publish('de')
# Fallbacks on
# This time however, the "de" translations are published.
request = self.get_request(path='/de/', language='de')
renderer = menu_pool.get_renderer(request)
menu = renderer.get_menu('CMSMenu')
nodes = menu.get_nodes(request)
self.assertEqual(len(nodes), len(pages))
with force_language('de'):
self.assertSequenceEqual(
[node.get_absolute_url() for node in nodes],
[page.get_absolute_url('de', fallback=False) for page in pages],
)
# Fallbacks off
request = self.get_request(path='/de/', language='de')
with self.settings(CMS_LANGUAGES=lang_settings):
renderer = menu_pool.get_renderer(request)
menu = renderer.get_menu('CMSMenu')
nodes = menu.get_nodes(request)
with force_language('de'):
self.assertSequenceEqual(
[node.get_absolute_url() for node in nodes],
[page.get_absolute_url('de', fallback=False) for page in pages],
)
def test_show_menu(self):
root = self.get_page(1)
context = self.get_context(page=root)
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].selected, True)
self.assertEqual(nodes[0].sibling, False)
self.assertEqual(nodes[0].descendant, False)
self.assertEqual(nodes[0].children[0].descendant, True)
self.assertEqual(nodes[0].children[0].children[0].descendant, True)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[1].get_absolute_url(), self.get_page(4).get_absolute_url())
self.assertEqual(nodes[1].sibling, True)
self.assertEqual(nodes[1].selected, False)
def test_show_menu_num_queries(self):
context = self.get_context()
# test standard show_menu
with self.assertNumQueries(5):
"""
The queries should be:
get all page nodes
get all page permissions
get all titles
get the menu cache key
set the menu cache key
"""
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
def test_show_menu_cache_key_leak(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
self.assertEqual(CacheKey.objects.count(), 0)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
def test_menu_cache_draft_only(self):
# Tests that the cms uses a separate cache for draft & live
public_page = self.get_page(1)
draft_page = public_page.publisher_public
edit_on_path = draft_page.get_absolute_url() + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
edit_off_path = public_page.get_absolute_url() + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
superuser = self.get_superuser()
# Prime the public menu cache
with self.login_user_context(superuser):
context = self.get_context(path=edit_off_path, page=public_page)
context['request'].session['cms_edit'] = False
Template("{% load menu_tags %}{% show_menu %}").render(context)
# This should prime the draft menu cache
with self.login_user_context(superuser):
context = self.get_context(path=edit_on_path, page=draft_page)
context['request'].session['cms_edit'] = True
Template("{% load menu_tags %}{% show_menu %}").render(context)
# All nodes should be draft nodes
node_ids = [node.id for node in context['children']]
page_count = Page.objects.drafts().filter(pk__in=node_ids).count()
self.assertEqual(len(node_ids), page_count, msg='Not all pages in the draft menu are draft')
def test_menu_cache_live_only(self):
# Tests that the cms uses a separate cache for draft & live
public_page = self.get_page(1)
draft_page = public_page.publisher_public
edit_on_path = draft_page.get_absolute_url() + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
edit_off_path = public_page.get_absolute_url() + '?preview&%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
superuser = self.get_superuser()
# Prime the draft menu cache
with self.login_user_context(superuser):
context = self.get_context(path=edit_on_path, page=draft_page)
context['request'].session['cms_edit'] = True
Template("{% load menu_tags %}{% show_menu %}").render(context)
# This should prime the public menu cache
with self.login_user_context(superuser):
context = self.get_context(path=edit_off_path, page=public_page)
context['request'].session['cms_edit'] = False
context['request'].session['cms_preview'] = True
Template("{% load menu_tags %}{% show_menu %}").render(context)
# All nodes should be public nodes
node_ids = [node.id for node in context['children']]
page_count = Page.objects.public().filter(pk__in=node_ids).count()
self.assertEqual(len(node_ids), page_count, msg='Not all pages in the public menu are public')
def test_menu_cache_respects_database_keys(self):
public_page = self.get_page(1)
# Prime the public menu cache
context = self.get_context(path=public_page.get_absolute_url(), page=public_page)
context['request'].session['cms_edit'] = False
# Prime the cache
with self.assertNumQueries(5):
# The queries should be:
# get all page nodes
# get all page permissions
# get all titles
# get the menu cache key
# set the menu cache key
Template("{% load menu_tags %}{% show_menu %}").render(context)
# One new CacheKey should have been created
self.assertEqual(CacheKey.objects.count(), 1)
# Because its cached, only one query is made to the db
with self.assertNumQueries(1):
# The queries should be:
# get the menu cache key
Template("{% load menu_tags %}{% show_menu %}").render(context)
# Delete the current cache key but don't touch the cache
CacheKey.objects.all().delete()
# The menu should be recalculated
with self.assertNumQueries(5):
# The queries should be:
# check if cache key exists
# get all page nodes
# get all page permissions
# get all title objects
# set the menu cache key
Template("{% load menu_tags %}{% show_menu %}").render(context)
def test_menu_keys_duplicate_clear(self):
"""
Tests that the menu clears all keys, including duplicates.
"""
CacheKey.objects.create(language="fr", site=1, key="a")
CacheKey.objects.create(language="fr", site=1, key="a")
self.assertEqual(CacheKey.objects.count(), 2)
menu_pool.clear(site_id=1, language='fr')
self.assertEqual(CacheKey.objects.count(), 0)
def test_only_active_tree(self):
context = self.get_context(page=self.get_page(1))
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 1)
page_4 = self.get_page(4)
context = self.get_context(path=page_4.get_absolute_url(), page=page_4)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[0].children), 0)
def test_only_one_active_level(self):
context = self.get_context(page=self.get_page(1))
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 0)
def test_only_level_zero(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 %}")
tpl.render(context)
nodes = context['children']
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one(self):
site = get_current_site()
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 100 100 %}")
tpl.render(context)
nodes = context['children']
level_2_public_pages = Page.objects.public().filter(node__depth=2, node__site=site)
self.assertEqual(len(nodes), level_2_public_pages.count())
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one_active(self):
context = self.get_context(page=self.get_page(1))
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes[0].children), 0)
def test_level_zero_and_one(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 1 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.children), 1)
def test_show_submenu(self):
context = self.get_context(page=self.get_page(1))
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 1)
tpl = Template("{% load menu_tags %}{% show_sub_menu 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 0)
page_3 = self.get_page(3)
context = self.get_context(path=page_3.get_absolute_url(), page=page_3)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 %}")
tpl.render(context)
nodes = context["children"]
# P3 is the selected node
self.assertFalse(nodes[0].selected)
self.assertTrue(nodes[0].children[0].selected)
# top level node should be P2
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(2).get_absolute_url())
# should include P3 as well
self.assertEqual(len(nodes[0].children), 1)
page_2 = self.get_page(2)
context = self.get_context(path=page_2.get_absolute_url(), page=page_2)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 0 %}")
tpl.render(context)
nodes = context["children"]
# P1 should be in the nav
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(1).get_absolute_url())
# P2 is selected
self.assertTrue(nodes[0].children[0].selected)
def test_show_submenu_template_root_level_none(self):
root = self.get_page(1)
context = self.get_context(path=root.get_absolute_url(), page=root)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 None 1 %}")
tpl.render(context)
nodes = context["children"]
# First node is P2 (P1 children) thus not selected
self.assertFalse(nodes[0].selected)
# nephew limit of 1, so only P2 is the nodes list
self.assertEqual(len(nodes), 1)
# P3 is a child of P2, but not in nodes list
self.assertTrue(nodes[0].children)
def test_show_breadcrumb(self):
page_3 = self.get_page(3)
context = self.get_context(path=self.get_page(3).get_absolute_url(), page=page_3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 1)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 0)
page1 = self.get_page(1)
page1.in_navigation = False
page1.save()
page2 = self.get_page(2)
context = self.get_context(path=page2.get_absolute_url(), page=page2)
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(isinstance(nodes[0], NavigationNode), True)
self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url())
def test_language_chooser(self):
# test simple language chooser with default args
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][0]['public'] = False
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% language_chooser %}")
tpl.render(context)
self.assertEqual(len(context['languages']), 3)
# try a different template and some different args
tpl = Template("{% load menu_tags %}{% language_chooser 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
tpl = Template("{% load menu_tags %}{% language_chooser 'short' 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
for lang in context['languages']:
self.assertEqual(*lang)
def test_language_chooser_all_for_staff(self):
"""
Language chooser should show all configured languages
on the current site if the user is staff.
"""
superuser = self.get_superuser()
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
# DE is marked as public False
lang_settings[1][1]['public'] = False
# FR is marked as public False
lang_settings[1][2]['public'] = False
with self.settings(CMS_LANGUAGES=lang_settings):
with self.login_user_context(superuser):
context = self.get_context(path=self.get_page(3).get_absolute_url())
Template("{% load menu_tags %}{% language_chooser %}").render(context)
self.assertEqual(len(context['languages']), 5)
self.assertSequenceEqual(
sorted(lang[0] for lang in context['languages']),
['de', 'en', 'es-mx', 'fr', 'pt-br']
)
def test_language_chooser_public_for_anon(self):
"""
Language chooser should only show public configured languages
on the current site if the user is anon.
"""
# PT-BR is already set to public False
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
# DE is marked as public False
lang_settings[1][1]['public'] = False
# FR is marked as public False
lang_settings[1][2]['public'] = False
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(path=self.get_page(3).get_absolute_url())
Template("{% load menu_tags %}{% language_chooser %}").render(context)
self.assertEqual(len(context['languages']), 2)
self.assertSequenceEqual(
sorted(lang[0] for lang in context['languages']),
['en', 'es-mx']
)
def test_page_language_url(self):
path = self.get_page(3).get_absolute_url()
context = self.get_context(path=path)
tpl = Template("{%% load menu_tags %%}{%% page_language_url '%s' %%}" % 'en')
url = tpl.render(context)
self.assertEqual(url, "%s" % path)
def test_show_menu_below_id(self):
page2 = self.get_page(2)
page2.reverse_id = "hello"
page2.save()
page2 = self.reload(page2)
self.assertEqual(page2.reverse_id, "hello")
page5 = self.get_page(5)
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
page3_url = self.get_page(3).get_absolute_url()
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
page2.in_navigation = False
page2.save()
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
def test_unpublished(self):
page2 = self.get_page(2)
page2.title_set.update(published=False)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 0)
def test_home_not_in_menu(self):
page1 = self.get_page(1)
page1.in_navigation = False
page1.save()
page4 = self.get_page(4)
page4.in_navigation = False
page4.save()
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(2).get_absolute_url())
self.assertEqual(nodes[0].children[0].get_absolute_url(), self.get_page(3).get_absolute_url())
page4 = self.get_page(4)
page4.in_navigation = True
page4.save()
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
def test_show_submenu_from_non_menu_page(self):
"""
Here's the structure bit we're interested in:
+ P6 (not in menu)
+ P7
+ P8
When we render P6, there should be a menu entry for P7 and P8 if the
tag parameters are "1 XXX XXX XXX"
"""
page6 = self.get_page(6)
context = self.get_context(page6.get_absolute_url(), page=page6)
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p6_children = page6.get_child_pages().filter(in_navigation=True).count()
self.assertEqual(len(nodes), number_of_p6_children)
page7 = self.get_page(7)
context = self.get_context(page7.get_absolute_url(), page=page7)
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), number_of_p6_children)
tpl = Template("{% load menu_tags %}{% show_menu 2 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p7_children = page7.get_child_pages().filter(in_navigation=True).count()
self.assertEqual(len(nodes), number_of_p7_children)
def test_show_breadcrumb_invisible(self):
# Must use the drafts to find the parent when calling create_page
parent = Page.objects.drafts().get(title_set__title='P3')
invisible_page = create_page("invisible", "nav_playground.html", "en",
parent=parent, published=True, in_navigation=False)
context = self.get_context(
path=invisible_page.get_absolute_url(),
page=invisible_page.publisher_public,
)
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 0 'menu/breadcrumb.html' 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 0 'menu/breadcrumb.html' 0 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 4)
class MenuTests(BaseMenuTest):
def test_build_nodes_inner_for_worst_case_menu(self):
'''
Tests the worst case scenario
node5
node4
node3
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 3)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1, node2, node3, node4, node5, ]
len_nodes = len(nodes)
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), len_nodes)
self.assertEqual(node1.parent, node2)
self.assertEqual(node2.parent, node3)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [node1])
self.assertEqual(node3.children, [node2])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_build_nodes_inner_for_circular_menu(self):
'''
TODO:
To properly handle this test we need to have a circular dependency
detection system.
Go nuts implementing it :)
'''
pass
def test_build_nodes_inner_for_broken_menu(self):
'''
Tests a broken menu tree (non-existing parent)
node5
node4
node3
<non-existant>
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 12)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1, node2, node3, node4, node5, ]
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), 3)
self.assertFalse(node1 in final_list)
self.assertFalse(node2 in final_list)
self.assertEqual(node1.parent, None)
self.assertEqual(node2.parent, None)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [])
self.assertEqual(node3.children, [])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_utils_mark_descendants(self):
tree_nodes, flat_nodes = self._get_nodes()
mark_descendants(tree_nodes)
for node in flat_nodes:
self.assertTrue(node.descendant, node)
def test_utils_find_selected(self):
tree_nodes, flat_nodes = self._get_nodes()
node = flat_nodes[0]
selected = find_selected(tree_nodes)
self.assertEqual(selected, node)
selected = find_selected([])
self.assertEqual(selected, None)
def test_utils_cut_levels(self):
tree_nodes, flat_nodes = self._get_nodes()
self.assertEqual(cut_levels(tree_nodes, 1), [flat_nodes[1]])
def test_empty_menu(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 0)
def test_render_menu_with_invalid_language(self):
"""
When rendering the menu, always fallback to a configured
language on the current site.
"""
# Refs - https://github.com/divio/django-cms/issues/6179
site_2 = Site.objects.create(id=2, name='example-2.com', domain='example-2.com')
de_defaults = {
'site': site_2,
'template': 'nav_playground.html',
'language': 'de',
}
fr_defaults = {
'site': site_2,
'template': 'nav_playground.html',
'language': 'fr',
}
create_page('DE-P1', published=True, in_navigation=True, **de_defaults)
create_page('DE-P2', published=True, in_navigation=True, **de_defaults)
create_page('DE-P3', published=True, in_navigation=True, **de_defaults)
create_page('FR-P1', published=True, in_navigation=True, **fr_defaults)
create_page('FR-P2', published=True, in_navigation=True, **fr_defaults)
with self.settings(SITE_ID=2):
request = self.get_request('/en/')
context = Context()
context['request'] = request
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 5)
self.assertEqual(nodes[0].title, 'DE-P1')
self.assertEqual(nodes[0].get_absolute_url(), '/de/de-p1/')
self.assertEqual(nodes[1].title, 'DE-P2')
self.assertEqual(nodes[1].get_absolute_url(), '/de/de-p2/')
self.assertEqual(nodes[2].title, 'DE-P3')
self.assertEqual(nodes[2].get_absolute_url(), '/de/de-p3/')
self.assertEqual(nodes[3].title, 'FR-P1')
self.assertEqual(nodes[3].get_absolute_url(), '/fr/fr-p1/')
self.assertEqual(nodes[4].title, 'FR-P2')
self.assertEqual(nodes[4].get_absolute_url(), '/fr/fr-p2/')
menu_pool.clear(site_id=2)
with self.settings(SITE_ID=2):
request = self.get_request('/en/de-p2/')
context = Context()
context['request'] = request
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 5)
self.assertEqual(nodes[0].title, 'DE-P1')
self.assertEqual(nodes[0].get_absolute_url(), '/de/de-p1/')
self.assertEqual(nodes[1].title, 'DE-P2')
self.assertEqual(nodes[1].get_absolute_url(), '/de/de-p2/')
self.assertEqual(nodes[2].title, 'DE-P3')
self.assertEqual(nodes[2].get_absolute_url(), '/de/de-p3/')
self.assertEqual(nodes[3].title, 'FR-P1')
self.assertEqual(nodes[3].get_absolute_url(), '/fr/fr-p1/')
self.assertEqual(nodes[4].title, 'FR-P2')
self.assertEqual(nodes[4].get_absolute_url(), '/fr/fr-p2/')
def test_render_menu_with_invalid_language_and_page(self):
"""
This tests an edge-case where the user requests a
language not configure for the current site
while having pages on the current site with unconfigured
translations.
"""
# Refs - https://github.com/divio/django-cms/issues/6179
site_2 = Site.objects.create(id=2, name='example-2.com', domain='example-2.com')
de_defaults = {
'site': site_2,
'template': 'nav_playground.html',
'language': 'de',
'in_navigation': True,
}
nl_defaults = {
'template': 'nav_playground.html',
'in_navigation': True,
}
create_page('DE-P1', published=True, **de_defaults)
create_page('DE-P2', published=True, **de_defaults)
create_page('DE-P3', published=True, **de_defaults)
# The nl language is not configured for the current site
# as a result, we have to create the pages manually.
nl_page_1 = Page(**nl_defaults)
nl_page_1.set_tree_node(site=site_2, target=None)
nl_page_1.save()
nl_page_1.title_set.create(
language='nl',
title='NL-P1',
slug='nl-p1',
)
nl_page_1.publish('nl')
nl_page_2 = Page(**nl_defaults)
nl_page_2.set_tree_node(site=site_2, target=None)
nl_page_2.save()
nl_page_2.title_set.create(
language='nl',
title='NL-P2',
slug='nl-p2',
)
nl_page_2.publish('nl')
create_title('fr', 'FR-P2', nl_page_2)
nl_page_2.publish('fr')
with self.settings(SITE_ID=2):
request = self.get_request('/en/')
context = Context()
context['request'] = request
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 4)
self.assertEqual(nodes[0].title, 'DE-P1')
self.assertEqual(nodes[0].get_absolute_url(), '/de/de-p1/')
self.assertEqual(nodes[1].title, 'DE-P2')
self.assertEqual(nodes[1].get_absolute_url(), '/de/de-p2/')
self.assertEqual(nodes[2].title, 'DE-P3')
self.assertEqual(nodes[2].get_absolute_url(), '/de/de-p3/')
self.assertEqual(nodes[3].title, 'FR-P2')
self.assertEqual(nodes[3].get_absolute_url(), '/fr/fr-p2/')
menu_pool.clear(site_id=2)
with self.settings(SITE_ID=2):
request = self.get_request('/en/de-p2/')
context = Context()
context['request'] = request
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 4)
self.assertEqual(nodes[0].title, 'DE-P1')
self.assertEqual(nodes[0].get_absolute_url(), '/de/de-p1/')
self.assertEqual(nodes[1].title, 'DE-P2')
self.assertEqual(nodes[1].get_absolute_url(), '/de/de-p2/')
self.assertEqual(nodes[2].title, 'DE-P3')
self.assertEqual(nodes[2].get_absolute_url(), '/de/de-p3/')
self.assertEqual(nodes[3].title, 'FR-P2')
self.assertEqual(nodes[3].get_absolute_url(), '/fr/fr-p2/')
def test_render_menu_with_invalid_language_and_no_fallbacks(self):
"""
The requested language is valid but there's no page
with it and the user has disabled all fallbacks.
The cms should render only nodes for the requested language.
"""
defaults = {
'template': 'nav_playground.html',
'language': 'de',
}
create_page('DE-P1', published=True, in_navigation=True, **defaults)
create_page('DE-P2', published=True, in_navigation=True, **defaults)
create_page('DE-P3', published=True, in_navigation=True, **defaults)
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][0]['fallbacks'] = []
lang_settings[1][1]['fallbacks'] = []
with self.settings(CMS_LANGUAGES=lang_settings):
request = self.get_request('/en/')
context = Context()
context['request'] = request
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 0)
menu_pool.clear(site_id=1)
with self.settings(CMS_LANGUAGES=lang_settings):
request = self.get_request('/en/de-p2/')
context = Context()
context['request'] = request
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 0)
@override_settings(CMS_PERMISSION=False)
class AdvancedSoftrootTests(SoftrootFixture, CMSTestCase):
"""
Tree in fixture (as taken from issue 662):
top
root
aaa
111
ccc
ddd
222
bbb
333
444
In the fixture, all pages are "in_navigation", "published" and
NOT-"soft_root".
What is a soft root?
If a page is a soft root, it becomes the root page in the menu if
we are currently on or under that page.
If we are above that page, the children of this page are not shown.
"""
def tearDown(self):
Page.objects.all().delete()
menu_pool.clear(all=True)
def get_page(self, name):
return Page.objects.public().get(title_set__slug=name)
def assertTreeQuality(self, a, b, *attrs):
"""
Checks that the node-lists a and b are the same for attrs.
This is recursive over the tree
"""
msg = '%r != %r with %r, %r' % (len(a), len(b), a, b)
self.assertEqual(len(a), len(b), msg)
for n1, n2 in zip(a, b):
for attr in attrs:
a1 = getattr(n1, attr)
a2 = getattr(n2, attr)
msg = '%r != %r with %r, %r (%s)' % (a1, a2, n1, n2, attr)
self.assertEqual(a1, a2, msg)
self.assertTreeQuality(n1.children, n2.children)
def test_top_not_in_nav(self):
"""
top: not in navigation
tag: show_menu 0 100 0 100
context shared: current page is aaa
context 1: root is NOT a softroot
context 2: root IS a softroot
expected result: the two node-trees should be equal
"""
top = self.get_page('top')
top.in_navigation = False
top.save()
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
# assert the two trees are equal in terms of 'level' and 'title'
self.assertTreeQuality(hard_root, soft_root, 'level', 'title')
def test_menu_tree_without_soft_root(self):
"""
tag: show_menu 0 100 0 100
expected result 1:
0:top
1:root
2:aaa
3:111
4:ccc
5:ddd
3:222
2:bbb
"""
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url(), page=aaa)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
mock_tree = [
AttributeObject(title='top', level=0, children=[
AttributeObject(title='root', level=1, children=[
AttributeObject(title='aaa', level=2, children=[
AttributeObject(title='111', level=3, children=[
AttributeObject(title='ccc', level=4, children=[
AttributeObject(title='ddd', level=5, children=[])
])
]),
AttributeObject(title='222', level=3, children=[])
]),
AttributeObject(title='bbb', level=2, children=[])
])
])
]
self.assertTreeQuality(hard_root, mock_tree)
def test_menu_tree_with_soft_root(self):
"""
tag: show_menu 0 100 0 100
expected result 2:
0:root
1:aaa
2:111
3:ccc
4:ddd
2:222
1:bbb
"""
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url(), page=aaa)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
mock_tree = [
AttributeObject(title='root', level=0, children=[
AttributeObject(title='aaa', level=1, children=[
AttributeObject(title='111', level=2, children=[
AttributeObject(title='ccc', level=3, children=[
AttributeObject(title='ddd', level=4, children=[])
])
]),
AttributeObject(title='222', level=2, children=[])
]),
AttributeObject(title='bbb', level=1, children=[])
])
]
self.assertTreeQuality(soft_root, mock_tree, 'title', 'level')
class ShowSubMenuCheck(SubMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6
+ P7 (not in menu)
+ P8
"""
def test_show_submenu(self):
page = self.get_page(6)
subpage = self.get_page(8)
context = self.get_context(page.get_absolute_url(), page=page)
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].id, subpage.pk)
def test_show_submenu_num_queries(self):
page = self.get_page(6)
subpage = self.get_page(8)
context = self.get_context(page.get_absolute_url(), page=page)
# test standard show_menu
with self.assertNumQueries(5):
"""
The queries should be:
get all page nodes
get all page permissions
get all titles
get the menu cache key
set the menu cache key
"""
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].id, subpage.pk)
class ShowMenuBelowIdTests(BaseMenuTest):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
def test_not_in_navigation(self):
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1, nodes)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
children = node.children
self.assertEqual(len(children), 1, repr(children))
child = children[0]
self.assertEqual(child.id, c.publisher_public.id)
def test_menu_beyond_soft_root(self):
"""
Test for issue 4107
Build the following tree:
A
|-B (soft_root)
|-C
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
a = create_page('A', reverse_id='a', **stdkwargs)
a_public = a.publisher_public
b = create_page('B', parent=a, soft_root=True, **stdkwargs)
b_public = b.publisher_public
c = create_page('C', parent=b, **stdkwargs)
c_public = c.publisher_public
context = self.get_context(a.get_absolute_url(), page=a_public)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
a_node = nodes[0]
self.assertEqual(a_node.id, a.publisher_public.pk) # On A, show from A
self.assertEqual(len(a_node.children), 1)
b_node = a_node.children[0]
self.assertEqual(b_node.id, b.publisher_public.pk)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(b.get_absolute_url(), page=b_public)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On B, show from B
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(c.get_absolute_url(), page=c_public)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On C, show from B
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(a.get_absolute_url(), page=a_public)
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On A, show from B (since below A)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(b.get_absolute_url(), page=b_public)
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On B, show from B (since below A)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
context = self.get_context(c.get_absolute_url(), page=c_public)
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check whole menu
self.assertEqual(len(nodes), 1)
b_node = nodes[0]
self.assertEqual(b_node.id, b.publisher_public.pk) # On C, show from B (since below A)
self.assertEqual(len(b_node.children), 1)
c_node = b_node.children[0]
self.assertEqual(c_node.id, c.publisher_public.pk)
self.assertEqual(len(c_node.children), 0)
def test_not_in_navigation_num_queries(self):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
with LanguageOverride('en'):
context = self.get_context(a.get_absolute_url())
with self.assertNumQueries(5):
"""
The queries should be:
get all page nodes
get all page permissions
get all titles
get the menu cache key
set the menu cache key
"""
# Actually seems to run:
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1, nodes)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
children = node.children
self.assertEqual(len(children), 1, repr(children))
child = children[0]
self.assertEqual(child.id, c.publisher_public.id)
def test_menu_in_soft_root(self):
"""
Test for issue 3504
Build the following tree:
A
|-B
C (soft_root)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', published=True,
in_navigation=True, soft_root=True)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
context = self.get_context(c.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='staff',
)
class ViewPermissionMenuTests(CMSTestCase):
def setUp(self):
self.page = create_page('page', 'nav_playground.html', 'en')
self.pages = [self.page]
self.user = self.get_standard_user()
self.site = get_current_site()
def get_request(self, user=None):
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'POST': {},
'GET': {},
'session': {},
}
return type('Request', (object,), attrs)
def test_public_for_all_staff(self):
request = self.get_request(self.user)
request.user.is_staff = True
with self.assertNumQueries(4):
"""
The queries are:
User permissions
Content type
GlobalPagePermission query
PagePermission count query
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, self.pages)
@override_settings(CMS_PUBLIC_FOR='all')
def test_public_for_all(self):
request = self.get_request(self.user)
with self.assertNumQueries(4):
"""
The queries are:
User permissions
Content type
GlobalPagePermission query
PagePermission query for affected pages
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, self.pages)
@override_settings(CMS_PUBLIC_FOR='all')
def test_unauthed(self):
request = self.get_request()
with self.assertNumQueries(1):
"""
The query is:
PagePermission query for affected pages
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, self.pages)
def test_authed_basic_perm(self):
self.user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(self.user)
with self.assertNumQueries(2):
"""
The queries are:
User permissions
Content type
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, self.pages)
def test_authed_no_access(self):
request = self.get_request(self.user)
with self.assertNumQueries(4):
"""
The queries are:
View Permission Calculation Query
GlobalpagePermission query for user
User permissions
Content type
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, [])
def test_unauthed_no_access(self):
request = self.get_request()
with self.assertNumQueries(0):
nodes = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(nodes, [])
def test_page_permissions(self):
request = self.get_request(self.user)
PagePermission.objects.create(can_view=True, user=self.user, page=self.page)
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query for affected pages
User permissions
Content type
GlobalpagePermission query for user
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, self.pages)
def test_page_permissions_view_groups(self):
group = Group.objects.create(name='testgroup')
self.user.groups.add(group)
request = self.get_request(self.user)
PagePermission.objects.create(can_view=True, group=group, page=self.page)
with self.assertNumQueries(5):
"""
The queries are:
PagePermission query for affected pages
User permissions
Content type
GlobalpagePermission query for user
Group query via PagePermission
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, self.pages)
def test_global_permission(self):
GlobalPagePermission.objects.create(can_view=True, user=self.user)
request = self.get_request(self.user)
group = Group.objects.create(name='testgroup')
PagePermission.objects.create(can_view=True, group=group, page=self.page)
with self.assertNumQueries(3):
"""
The queries are:
User permissions
Content type
GlobalpagePermission query for user
"""
pages = get_visible_nodes(request, self.pages, self.site)
self.assertEqual(pages, self.pages)
@override_settings(
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all',
)
class PublicViewPermissionMenuTests(CMSTestCase):
def setUp(self):
"""
Create this published hierarchy:
A
B1 B2
C1 C2 C3 C4
"""
l = 'nav_playground.html'
kw = dict(published=True, in_navigation=True)
a = create_page('a', l, 'en', **kw)
b1 = create_page('b1', l, 'en', parent=a, **kw)
b2 = create_page('b2', l, 'en', parent=a, **kw)
c1 = create_page('c1', l, 'en', parent=b1, **kw)
c2 = create_page('c2', l, 'en', parent=b1, **kw)
c3 = create_page('c3', l, 'en', parent=b2, **kw)
c4 = create_page('c4', l, 'en', parent=b2, **kw)
self.pages = [a, b1, c1, c2, b2, c3, c4] # tree order
self.site = get_current_site()
self.user = self._create_user("standard", is_staff=False, is_superuser=False)
self.other = self._create_user("other", is_staff=False, is_superuser=False)
PagePermission.objects.create(page=b1, user=self.user, can_view=True,
grant_on=ACCESS_PAGE_AND_DESCENDANTS)
PagePermission.objects.create(page=b2, user=self.other, can_view=True,
grant_on=ACCESS_PAGE_AND_DESCENDANTS)
attrs = {
'user': self.user,
'REQUEST': {},
'POST': {},
'GET': {},
'session': {},
}
self.request = type('Request', (object,), attrs)
def test_draft_list_access(self):
pages = get_visible_nodes(self.request, self.pages, self.site)
pages = (
Page
.objects
.drafts()
.filter(pk__in=(page.pk for page in pages))
.values_list('title_set__title', flat=True)
)
self.assertSequenceEqual(sorted(pages), ['a', 'b1', 'c1', 'c2'])
def test_draft_qs_access(self):
pages = get_visible_nodes(self.request, Page.objects.drafts(), self.site)
pages = (
Page
.objects
.drafts()
.filter(pk__in=(page.pk for page in pages))
.values_list('title_set__title', flat=True)
)
self.assertSequenceEqual(sorted(pages), ['a', 'b1', 'c1', 'c2'])
def test_public_qs_access(self):
pages = get_visible_nodes(self.request, Page.objects.public(), self.site)
pages = (
Page
.objects
.public()
.filter(id__in=(page.pk for page in pages))
.values_list('title_set__title', flat=True)
)
pages = sorted(pages)
self.assertEqual(pages, ['a', 'b1', 'c1', 'c2'])
@override_settings(CMS_PERMISSION=False)
class SoftrootTests(CMSTestCase):
"""
Ask evildmp/superdmp if you don't understand softroots!
Softroot description from the docs:
A soft root is a page that acts as the root for a menu navigation tree.
Typically, this will be a page that is the root of a significant new
section on your site.
When the soft root feature is enabled, the navigation menu for any page
will start at the nearest soft root, rather than at the real root of
the site’s page hierarchy.
This feature is useful when your site has deep page hierarchies (and
therefore multiple levels in its navigation trees). In such a case, you
usually don’t want to present site visitors with deep menus of nested
items.
For example, you’re on the page “Introduction to Bleeding”, so the menu
might look like this:
School of Medicine
Medical Education
Departments
Department of Lorem Ipsum
Department of Donec Imperdiet
Department of Cras Eros
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <this is the current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
Department of Curabitur a Purus
Department of Sed Accumsan
Department of Etiam
Research
Administration
Contact us
Impressum
which is frankly overwhelming.
By making “Department of Mediaeval Surgery” a soft root, the menu
becomes much more manageable:
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
"""
def test_basic_home(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "Home" (0 100 100 100):
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
people = create_page("People", parent=home, **stdkwargs)
# On Home
context = self.get_context(home.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
homenode = nodes[0]
self.assertEqual(homenode.id, home.publisher_public.pk)
self.assertEqual(len(homenode.children), 2)
projectsnode, peoplenode = homenode.children
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(peoplenode.id, people.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
self.assertEqual(len(peoplenode.children), 0)
def test_basic_projects(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "Projects" (0 100 100 100):
|- Projects (SOFTROOT)
| |- django CMS
| |- django Shop
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
create_page("People", parent=home, **stdkwargs)
# On Projects
context = self.get_context(projects.get_absolute_url(), page=projects.publisher_public)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
projectsnode = nodes[0]
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
def test_basic_djangocms(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "django CMS" (0 100 100 100):
|- Projects (SOFTROOT)
| |- django CMS
| |- django Shop
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
create_page("People", parent=home, **stdkwargs)
# On django CMS
context = self.get_context(djangocms.get_absolute_url(), page=djangocms.publisher_public)
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
projectsnode = nodes[0]
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
def test_basic_people(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "People" (0 100 100 100):
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
people = create_page("People", parent=home, **stdkwargs)
# On People
context = self.get_context(home.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
homenode = nodes[0]
self.assertEqual(homenode.id, home.publisher_public.pk)
self.assertEqual(len(homenode.children), 2)
projectsnode, peoplenode = homenode.children
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(peoplenode.id, people.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
self.assertEqual(len(peoplenode.children), 0)
| {
"content_hash": "3a40386a41cf6c9e35736319e352fc77",
"timestamp": "",
"source": "github",
"line_count": 2128,
"max_line_length": 136,
"avg_line_length": 39.55169172932331,
"alnum_prop": 0.5738421690468836,
"repo_name": "benzkji/django-cms",
"id": "7f2a89d4eb0d63e5a8e6b88ea6eef036bbe4bdcb",
"size": "84204",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/tests/test_menu.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132972"
},
{
"name": "HTML",
"bytes": "201324"
},
{
"name": "JavaScript",
"bytes": "1238070"
},
{
"name": "Python",
"bytes": "2356866"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
} |
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload
from TASSELpy.java.lang.Object import Object
from TASSELpy.javaObj import javaObj
from TASSELpy.utils.helper import make_sig
from abc import ABCMeta
import javabridge
java_imports = {'Enum':'java/lang/Enum',
'Object':'java/lang/Object',
'String':'java/lang/String'}
## Base class only used to refer to enum type with Enum
class metaEnum:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if issubclass(C, Enum):
return True
else:
return False
class Enum(Object):
"""
This is the common base class of all Java language enumeration types.
"""
_java_name = java_imports['Enum']
def __repr__(self):
return "Enum(%s: %d)" % (self.name(), self.ordinal())
@javaConstructorOverload(java_imports['Enum'])
def __init__(self, *args, **kwargs):
"""
Instantiates the common base class of all Java language
enumeration types.
There is no explicit constructor. Only an object can be
pass in
"""
pass
## Compares this enum with the specified object for order
@javaOverload("compareTo",
(make_sig([java_imports['Enum']],'int'),(metaEnum,),None))
def compareTo(self, *args):
"""
Compares this enum with the specified object for order
Signatures:
int compareTo(Enum o)
Arguments:
The object to compare to
"""
pass
## Returns true if the specified object is equal to this enum constant
# @param other The object you want to test for equality
# @return Whether object is equal to enum constant
@javaOverload("equals",
(make_sig([java_imports['Object']],'boolean'),(javaObj,),None))
def equals(self, *args):
"""
Returns true if the specified object is equal to this enum constant
Signatures:
boolean equals(Object other)
Arguments:
other -- the object you want to test for equality
Returns:
Whether object is equal to enum constant
"""
pass
## Returns the name of this enum constant, exactly as declared in
# its enum declaration
# @return The name of the enum constant
@javaOverload("name",
(make_sig([],java_imports['String']),(),None))
def name(self, *args):
"""
Returns the name of this enum constant, exactly as declared
in its enum declaration
Signatures:
String name()
Returns:
The name of this enum constant
"""
pass
## Returns the ordinal of this enumeration constant (its position in
# its enum declaration, where the initial constant is assigned an
# ordinal of zero
# @return Oridinal of this enumeration constant
@javaOverload("ordinal",
(make_sig([],'int'),(),None))
def ordinal(self, *args):
"""
Returns the ordinal of this enumeration constant (its position in
its enum declaration, where the initial constant is assigned an
ordinal of zero
Signatures:
int ordinal()
Returns:
Ordinal of this enumeration constant
"""
pass
## Returns the name of this enum constant, as contained in the
# declaration
# @return The name of this enum constant
@javaOverload("toString",
(make_sig([],java_imports['String']),(),None))
def toString(self, *args):
"""
Returns the name of this enum constant, as contained in the
declaration
Signatures:
String toString()
Returns:
The name of this enum constant
"""
pass
class enum(object):
"""
Class used to declare wrapper enums like in Java
Example:
my_enum = enum("path/to/MY_ENUM","FIRST","SECOND","THIRD")
"""
def __init__(self, enum_name, *args, **kwargs):
"""
Instantiates an enum. Each constant becomes a class attribute
that is an instance of the Enum class
Arguments:
enum_name -- The path to the enum in Java (e.g. "path/to/class$MY_ENUM")
args -- The constant names
subclass -- Optional name of subclass to given constant instances
"""
self.subclass = Enum
if 'subclass' in kwargs:
self.subclass = type(kwargs['subclass'],(Enum,),{})
for arg in args:
if 'subclass' in kwargs:
setattr(self,arg,
self.subclass(obj=javabridge.get_static_field(enum_name,
arg,"L%s;" % enum_name)))
else:
setattr(self,arg,
Enum(obj=javabridge.get_static_field(enum_name,
arg,
"L%s;" % enum_name)))
def __repr__(self):
tuples = [(k,v) for k,v in self.__dict__.iteritems() if isinstance(v,self.subclass)]
tuples = sorted(tuples, key=lambda x: x[1].ordinal())
return "<%s>" % ', '.join(["%s: %d" % (v.toString(), v.ordinal()) for k,v in \
tuples if isinstance(v,self.subclass)])
| {
"content_hash": "fffcc1a14347415ccdef7d7a45f0c60d",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 92,
"avg_line_length": 30.872093023255815,
"alnum_prop": 0.5790960451977402,
"repo_name": "er432/TASSELpy",
"id": "e10abc933f58ea3219081206ea9816e1f51cfa88",
"size": "5310",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "TASSELpy/java/lang/Enum.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "947691"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
} |
import time
from threading import Thread
from cassandra import ConsistencyLevel
from ccmlib.node import ToolError
from dtest import Tester, debug, create_ks, create_cf
from tools.data import insert_c1c2, query_c1c2
from tools.decorators import since, no_vnodes
class TestRebuild(Tester):
ignore_log_patterns = (
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
)
def simple_rebuild_test(self):
"""
@jira_ticket CASSANDRA-9119
Test rebuild from other dc works as expected.
"""
keys = 1000
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
node1 = cluster.create_node('node1', False,
None,
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
cluster.add(node1, True, data_center='dc1')
# start node in dc1
node1.start(wait_for_binary_proto=True)
# populate data in dc1
session = self.patient_exclusive_cql_connection(node1)
create_ks(session, 'ks', {'dc1': 1})
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.LOCAL_ONE)
# check data
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
session.shutdown()
# Bootstrapping a new node in dc2 with auto_bootstrap: false
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc2
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
if self.cluster.version() >= '2.2':
# alter system_auth -- rebuilding it no longer possible after
# CASSANDRA-11848 prevented local node from being considered a source
# Only do this on 2.2+, because on 2.1, this keyspace only
# exists if auth is enabled, which it isn't in this test
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks')
self.rebuild_errors = 0
# rebuild dc2 from dc1
def rebuild():
try:
node2.nodetool('rebuild dc1')
except ToolError as e:
if 'Node is still rebuilding' in e.stdout:
self.rebuild_errors += 1
else:
raise e
class Runner(Thread):
def __init__(self, func):
Thread.__init__(self)
self.func = func
self.thread_exc_info = None
def run(self):
"""
Closes over self to catch any exceptions raised by func and
register them at self.thread_exc_info
Based on http://stackoverflow.com/a/1854263
"""
try:
self.func()
except Exception:
import sys
self.thread_exc_info = sys.exc_info()
cmd1 = Runner(rebuild)
cmd1.start()
# concurrent rebuild should not be allowed (CASSANDRA-9119)
# (following sleep is needed to avoid conflict in 'nodetool()' method setting up env.)
time.sleep(.1)
# we don't need to manually raise exeptions here -- already handled
rebuild()
cmd1.join()
# manually raise exception from cmd1 thread
# see http://stackoverflow.com/a/1854263
if cmd1.thread_exc_info is not None:
raise cmd1.thread_exc_info[1], None, cmd1.thread_exc_info[2]
# exactly 1 of the two nodetool calls should fail
# usually it will be the one in the main thread,
# but occasionally it wins the race with the one in the secondary thread,
# so we check that one succeeded and the other failed
self.assertEqual(self.rebuild_errors, 1,
msg='rebuild errors should be 1, but found {}. Concurrent rebuild should not be allowed, but one rebuild command should have succeeded.'.format(self.rebuild_errors))
# check data
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
@since('2.2')
def resumable_rebuild_test(self):
"""
@jira_ticket CASSANDRA-10810
Test rebuild operation is resumable
"""
self.ignore_log_patterns = list(self.ignore_log_patterns) + [r'Error while rebuilding node',
r'Streaming error occurred on session with peer 127.0.0.3',
r'Remote peer 127.0.0.3 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
# Create 2 nodes on dc1
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node1, True, data_center='dc1')
cluster.add(node2, True, data_center='dc1')
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
# Insert data into node1 and node2
session = self.patient_exclusive_cql_connection(node1)
create_ks(session, 'ks', {'dc1': 1})
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
key = list(range(10000, 20000))
session = self.patient_exclusive_cql_connection(node2)
session.execute('USE ks')
insert_c1c2(session, keys=key, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Create a new node3 on dc2
node3 = cluster.create_node('node3', False,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300', '2002', None,
binary_interface=('127.0.0.3', 9042),
byteman_port='8300')
cluster.add(node3, False, data_center='dc2')
node3.start(wait_other_notice=False, wait_for_binary_proto=True)
# Wait for snitch to be refreshed
time.sleep(5)
# Alter necessary keyspace for rebuild operation
session = self.patient_exclusive_cql_connection(node3)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
# Path to byteman script which makes the streaming to node2 throw an exception, making rebuild fail
script = ['./byteman/inject_failure_streaming_to_node2.btm']
node3.byteman_submit(script)
# First rebuild must fail and data must be incomplete
with self.assertRaises(ToolError, msg='Unexpected: SUCCEED'):
debug('Executing first rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: FAILED')
session.execute('USE ks')
with self.assertRaises(AssertionError, msg='Unexpected: COMPLETE'):
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: INCOMPLETE')
debug('Executing second rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: SUCCEED')
# Check all streaming sessions completed, streamed ranges are skipped and verify streamed data
node3.watch_log_for('All sessions completed')
node3.watch_log_for('Skipping streaming those ranges.')
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: COMPLETE')
@since('3.6')
def rebuild_ranges_test(self):
"""
@jira_ticket CASSANDRA-10406
"""
keys = 1000
cluster = self.cluster
tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2'])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', tokens[0],
binary_interface=('127.0.0.1', 9042))
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.add(node1, True, data_center='dc1')
node1 = cluster.nodelist()[0]
# start node in dc1
node1.start(wait_for_binary_proto=True)
# populate data in dc1
session = self.patient_exclusive_cql_connection(node1)
# ks1 will be rebuilt in node2
create_ks(session, 'ks1', {'dc1': 1})
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
# ks2 will not be rebuilt in node2
create_ks(session, 'ks2', {'dc1': 1})
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Bootstraping a new node in dc2 with auto_bootstrap: false
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', tokens[1],
binary_interface=('127.0.0.2', 9042))
node2.set_configuration_options(values={'initial_token': tokens[1]})
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc2
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks1 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE ks2 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks1')
# rebuild only ks1 with range that is node1's replica
node2.nodetool('rebuild -ks ks1 -ts (%s,%s] dc1' % (tokens[1], str(pow(2, 63) - 1)))
# check data is sent by stopping node1
node1.stop()
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE)
# ks2 should not be streamed
session.execute('USE ks2')
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
@since('3.10')
@no_vnodes()
def disallow_rebuild_nonlocal_range_test(self):
"""
@jira_ticket CASSANDRA-9875
Verifies that nodetool rebuild throws an error when an operator
attempts to rebuild a range that does not actually belong to the
current node
1. Set up a 3 node cluster
2. Create a new keyspace with replication factor 2
3. Run rebuild on node1 with a range that it does not own and assert that an error is raised
"""
cluster = self.cluster
tokens = cluster.balanced_tokens(3)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
cluster.populate(3)
node1, node2, node3 = cluster.nodelist()
node1_token, node2_token, node3_token = tokens[:3]
node1.set_configuration_options(values={'initial_token': node1_token})
node2.set_configuration_options(values={'initial_token': node2_token})
node3.set_configuration_options(values={'initial_token': node3_token})
cluster.start(wait_for_binary_proto=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute("CREATE KEYSPACE ks1 WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
with self.assertRaisesRegexp(ToolError, 'is not a range that is owned by this node'):
node1.nodetool('rebuild -ks ks1 -ts (%s,%s]' % (node1_token, node2_token))
@since('3.10')
@no_vnodes()
def disallow_rebuild_from_nonreplica_test(self):
"""
@jira_ticket CASSANDRA-9875
Verifies that nodetool rebuild throws an error when an operator
attempts to rebuild a range and specifies sources that are not
replicas of that range.
1. Set up a 3 node cluster
2. Create a new keyspace with replication factor 2
3. Run rebuild on node1 with a specific range using a source that
does not own the range and assert that an error is raised
"""
cluster = self.cluster
tokens = cluster.balanced_tokens(3)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
cluster.populate(3)
node1, node2, node3 = cluster.nodelist()
node1_token, node2_token, node3_token = tokens[:3]
node1.set_configuration_options(values={'initial_token': node1_token})
node2.set_configuration_options(values={'initial_token': node2_token})
node3.set_configuration_options(values={'initial_token': node3_token})
cluster.start(wait_for_binary_proto=True)
node3_address = node3.network_interfaces['binary'][0]
session = self.patient_exclusive_cql_connection(node1)
session.execute("CREATE KEYSPACE ks1 WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
with self.assertRaisesRegexp(ToolError, 'Unable to find sufficient sources for streaming range'):
node1.nodetool('rebuild -ks ks1 -ts (%s,%s] -s %s' % (node3_token, node1_token, node3_address))
@since('3.10')
@no_vnodes()
def rebuild_with_specific_sources_test(self):
"""
@jira_ticket CASSANDRA-9875
Verifies that an operator can specify specific sources to use
when rebuilding.
1. Set up a 2 node cluster across dc1 and dc2
2. Create new keyspaces with replication factor 2 (one replica in each datacenter)
4. Populate nodes with data
5. Create a new node in dc3 and update the keyspace replication
6. Run rebuild on the new node with a specific source in dc2
7. Assert that streaming only occurred between the new node and the specified source
8. Assert that the rebuild was successful by checking the data
"""
keys = 1000
cluster = self.cluster
tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2', 'dc3'])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
cluster.populate([1, 1], tokens=tokens[:2])
node1, node2 = cluster.nodelist()
cluster.start(wait_for_binary_proto=True)
# populate data in dc1, dc2
session = self.patient_exclusive_cql_connection(node1)
# ks1 will be rebuilt in node3
create_ks(session, 'ks1', {'dc1': 1, 'dc2': 1})
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
# ks2 will not be rebuilt in node3
create_ks(session, 'ks2', {'dc1': 1, 'dc2': 1})
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session.shutdown()
# bootstrap a new node in dc3 with auto_bootstrap: false
node3 = cluster.create_node('node3', False,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300', '2002', tokens[2],
binary_interface=('127.0.0.3', 9042))
cluster.add(node3, False, data_center='dc3')
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc3
session = self.patient_exclusive_cql_connection(node3)
session.execute("ALTER KEYSPACE ks1 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1, 'dc3':1};")
session.execute("ALTER KEYSPACE ks2 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1, 'dc3':1};")
session.execute('USE ks1')
node2_address = node2.network_interfaces['binary'][0]
node3_address = node3.network_interfaces['binary'][0]
# rebuild only ks1, restricting the source to node2
node3.nodetool('rebuild -ks ks1 -ts (%s,%s] -s %s' % (tokens[2], str(pow(2, 63) - 1), node2_address))
# verify that node2 streamed to node3
log_matches = node2.grep_log('Session with /%s is complete' % node3_address)
self.assertTrue(len(log_matches) > 0)
# verify that node1 did not participate
log_matches = node1.grep_log('streaming plan for Rebuild')
self.assertEqual(len(log_matches), 0)
# check data is sent by stopping node1, node2
node1.stop()
node2.stop()
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE)
# ks2 should not be streamed
session.execute('USE ks2')
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
| {
"content_hash": "f7173ef9cce1bfa8a940df9448d660a0",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 190,
"avg_line_length": 45.05936073059361,
"alnum_prop": 0.5863903526550466,
"repo_name": "krummas/cassandra-dtest",
"id": "13d96bad05e1a18a9a51c03101ef63cb26f0936f",
"size": "19736",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rebuild_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2355499"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
import argparse
import logging
import time
from glob import glob
from os.path import normpath
from .. import cli
from .base import raise_parse_exception, suppress_exit, Command
def register_commands(commands):
commands['run-batch'] = RunBatchCommand()
commands['sleep'] = SleepCommand()
def register_command_info(aliases, command_info):
aliases['run'] = 'run-batch'
command_info[run_batch_parser.prog] = run_batch_parser.description
command_info[sleep_parser.prog] = sleep_parser.description
run_batch_parser = argparse.ArgumentParser(prog='run-batch', description='Run batch of Commander commands from a file')
run_batch_parser.add_argument(
'-d', '--delay', dest='delay', type=int, action='store',
help='Delay (in seconds) between commands to prevent throttling'
)
run_batch_parser.add_argument(
'-q', '--quiet', dest='quiet', action='store_true', help="Don't display batch file info"
)
run_batch_parser.add_argument(
'-n', '--dry-run', dest='dry_run', action='store_true', help='Preview the commands that will be run'
)
run_batch_parser.add_argument(
'batch-file-patterns', nargs='*', type=str, action='store', help='One or more batch files of Commander commands'
)
run_batch_parser.error = raise_parse_exception
run_batch_parser.exit = suppress_exit
sleep_parser = argparse.ArgumentParser(
prog='sleep', description='Sleep (in seconds) for adding delay between batch commands'
)
sleep_parser.add_argument(
'sleep-duration', nargs='?', type=int, action='store', help='Sleep duration in seconds'
)
sleep_parser.error = raise_parse_exception
sleep_parser.exit = suppress_exit
class RunBatchCommand(Command):
def get_parser(self):
return run_batch_parser
def execute(self, params, **kwargs):
dry_run = kwargs.get('dry_run', False)
command_delay = kwargs.get('delay') or 0
quiet = kwargs.get('quiet', False)
pattern_list = kwargs.get('batch-file-patterns', [])
if len(pattern_list) == 0:
logging.warning(f'Please specify one or more batch files to run')
return
if dry_run:
print('The following files and commands would be run:')
for pattern in pattern_list:
for filepath in glob(pattern):
filepath = normpath(filepath)
if dry_run:
print(f'{filepath}:')
elif not quiet:
logging.info(f'Running Keeper Commander batch file {filepath}...')
with open(filepath) as f:
lines = f.readlines()
commands = [c.strip() for c in lines if not c.startswith('#')]
if len(commands) > 0:
if dry_run:
print(' ' + '\n '.join(commands))
else:
cli.runcommands(params, commands=commands, command_delay=command_delay, quiet=quiet)
else:
if dry_run:
print('No commands')
else:
logging.warning(f'No commands to execute in batch file {filepath}')
class SleepCommand(Command):
def get_parser(self):
return sleep_parser
def execute(self, params, **kwargs):
sleep_duration = kwargs.get('sleep-duration')
if sleep_duration is None:
logging.warning(f'Please specify the sleep duration in seconds')
return
time.sleep(sleep_duration)
| {
"content_hash": "ba0c89bf53d107e6f2589b402a94aff4",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 119,
"avg_line_length": 36.54639175257732,
"alnum_prop": 0.6118476727785613,
"repo_name": "Keeper-Security/Commander",
"id": "63ec3e35cea3da4d8000d218c592bb8a5a64edf3",
"size": "3761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keepercommander/commands/scripting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2274231"
},
{
"name": "Shell",
"bytes": "3388"
}
],
"symlink_target": ""
} |
import tests.missing_data.test_missing_data_ozone_generic as gen
gen.test_ozone_missing_data('DiscardRow', 'Mean')
| {
"content_hash": "d78efe69011fc88537701b26dd7feaff",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 64,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.7844827586206896,
"repo_name": "antoinecarme/pyaf",
"id": "e6a6b8ee09fd7a27e808f9159296ff7c3346f083",
"size": "116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/missing_data/test_missing_data_ozone_DiscardRow_Mean.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'User'
db.create_table(u'presentationsapp_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)),
('password_hash', self.gf('django.db.models.fields.CharField')(default='', max_length=128, null=True, blank=True)),
('password_salt', self.gf('django.db.models.fields.CharField')(default='', max_length=32, null=True, blank=True)),
('reset_code', self.gf('django.db.models.fields.CharField')(default='', max_length=32, null=True, blank=True)),
('admin', self.gf('django.db.models.fields.BooleanField')(default=False)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
('name', self.gf('django.db.models.fields.CharField')(default='', max_length=64, null=True, blank=True)),
))
db.send_create_signal(u'presentationsapp', ['User'])
def backwards(self, orm):
# Deleting model 'User'
db.delete_table(u'presentationsapp_user')
models = {
u'presentationsapp.user': {
'Meta': {'object_name': 'User'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'password_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'password_salt': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['presentationsapp'] | {
"content_hash": "5e3878883e97f89def856b755dbab0f1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 144,
"avg_line_length": 56.27906976744186,
"alnum_prop": 0.5925619834710744,
"repo_name": "masonsbro/presentations",
"id": "3cfcb1eab024a3dff207e8789515184b04e795d7",
"size": "2444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presentationsapp/migrations/0003_auto__add_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33890"
}
],
"symlink_target": ""
} |
import particleDataStructures as pds
from BrickPi import * #import BrickPi.py file to use BrickPi operations
from numpy import *
import random
LEFT_MOTOR = PORT_A
RIGHT_MOTOR = PORT_C
LEFT_TOUCHSENSOR = PORT_3
RIGHT_TOUCHSENSOR = PORT_1
SONAR_SENSOR = PORT_2
stdDev = 0.05
likeliOffset = 0.2
likeliThreshold = 0.2
angleThreshold = 0.558505361 # 32 degree
numOfParticles = 100
angleChecks = 0 # counter for angle check reported from all particles
hit = False
def initialise():
BrickPiSetup() # setup the serial port for communication
BrickPi.MotorEnable[LEFT_MOTOR] = 1 #Enable the Motor A
BrickPi.MotorEnable[RIGHT_MOTOR] = 1 #Enable the Motor B
BrickPi.SensorType[LEFT_TOUCHSENSOR] = TYPE_SENSOR_TOUCH #Set the type of sensor
BrickPi.SensorType[RIGHT_TOUCHSENSOR] = TYPE_SENSOR_TOUCH #Set the type of sensor
BrickPi.SensorType[SONAR_SENSOR] = TYPE_SENSOR_ULTRASONIC_CONT # Set up ultrsonic sensor
BrickPiSetupSensors()
def bumper_left_hit():
"""get left bumper sensor data
"""
return BrickPi.Sensor[LEFT_TOUCHSENSOR]
def bumper_right_hit():
"""get right bumper sensor data
"""
return BrickPi.Sensor[RIGHT_TOUCHSENSOR]
def get_distance_measurement():
"""get sonar sensor data
"""
return BrickPi.Sensor[SONAR_SENSOR] - 2
def calculate_depth(wallCoord, location, theta):
"""calculated excepted depth for a
given particle(pds.data) and give wall
"""
numerator = (wallCoord.by - wallCoord.ay)*(wallCoord.ax - location.x) - (wallCoord.bx - wallCoord.ax)*(wallCoord.ay - location.y)
denominator = (wallCoord.by - wallCoord.ay)*math.cos(theta) - (wallCoord.bx - wallCoord.ax)*math.sin(theta)
depth = numerator/denominator
return depth
def check_angle(wallCoord, theta):
"""check whether the current angle to the wall in the front
"""
numerator = (math.cos(theta)*(wallCoord.ay - wallCoord.by) + math.sin(theta)*(wallCoord.bx - wallCoord.ax))
denominator = math.sqrt(pow((wallCoord.ay - wallCoord.by),2) + pow((wallCoord.bx - wallCoord.ax),2))
beta = math.acos(numerator/denominator)
# return whether angle is too big
return beta > angleThreshold
class Coordinate:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def calculate_likelihood(px, py, theta, z):
"""calculated likelihood for one particle and the
expected depth to the closeted wall in the front
"""
wallIndex = 0; # index of the wall that the "depth" is the shortest
currentShortestDepth = 0.0
for wall in pds.mymap.walls:
wallCoord = Coordinate(ax = wall[0], ay = wall[1], bx = wall[2], by = wall[3])
loc = Coordinate(x = px, y = px)
depth = calculate_depth(wallCoord, loc, theta)
# hits point
hitPoint = Coordinate(x = px + depth*math.cos(theta), y = py + depth*math.sin(theta))
# check hitpoint is in the range of the wall
inrange = hitPoint.x < max(wallCoord.ax, wallCoord.bx) and\
hitPoint.x > min(wallCoord.ax, wallCoord.bx) and\
hitPoint.y < max(wallCoord.ay, wallCoord.by) and\
hitPoint.y > min(wallCoord.ay, wallCoord.by)
if(inrange and depth < currentShortestDepth):
currentShortestDepth = depth
wallIndex = pds.mymap.walls.index(wall)
# calculate likelihood
likelihood = math.exp(-(pow((currentShortestDepth - z),2))/(2*pow(stdDev,2)))/(stdDev*math.sqrt(2*math.pi)) + likeliOffset
# check angle
global angleChecks
front_wall = pds.mymap.walls[wallIndex]
frontWallCoord = Coordinate(ax = front_wall[0], ay = front_wall[1], bx = front_wall[2], by = front_wall[3])
if check_angle(frontWallCoord, theta):
angleChecks = angleChecks + 1
return likelihood
def noise_e():
return random.gauss(0, stdDev)
def noise_f():
return random.gauss(0, stdDev)
def noise_g():
return random.gauss(0, stdDev)
def update_theta(update, old_theta):
''' update the angle theta taking care of
angular wrap-around
'''
new_theta = update + old_theta
if(new_theta > 2*math.pi):
new_theta = new_theta - 2*math.pi
'''elif(new_theta < -math.pi):
new_theta = new_theta + math.pi'''
return new_theta
def forward(distance):
motorRotateDegree([100,115],[18.75*distance,18.25*distance],[LEFT_MOTOR,RIGHT_MOTOR])
def turn(degrees):
motorRotateDegree([120,120],[-1.950*degrees,1.950*degrees],[LEFT_MOTOR,RIGHT_MOTOR])
def forward_renew_particle(D, particle):
'''straight line movement particle update
'''
# sonar_measure
z = get_distance_measurement()
#z = 100
e = noise_e()
# new_x
particle[0] = particle[0]+(D+e)*math.cos(particle[2])
# new_y
particle[1] = particle[1]+(D+e)*math.sin(particle[2])
# new_theta
particle[2] = update_theta(noise_f(), particle[2])
#if z <= 100:
likelihood = calculate_likelihood(particle[0], particle[1], particle[2], z)
#print "likelihood " + str(likelihood)
# new_weight
particle[3] = likelihood*particle[3]
#print "particle " + str(particle)
return particle
def turn_renew_particle(alpha, particle):
'''pure rotation particle update
'''
z = get_distance_measurement()
# new_theta
particle[2] = update_theta(alpha + noise_g(), particle[2])
#if z <= 100:
likelihood = calculate_likelihood(particle[0], particle[1], particle[2], z)
#print "likelihood " + str(likelihood)
# new_weight
particle[3] = likelihood*particle[3]
#print "particle " + str(particle)
return particle
def renew_particles(motion, operation):
'''update particle depends on motion type
'''
global angleChecks
#print "before particles " + str(particles.data)
skip = False
if operation == "forward":
temp = [forward_renew_particle(motion.forwardDis, list(particle)) for particle in particles.data]
# If the number of angle checks reported is too big
'''if angleChecks > particles.n*0.6:
skip = True'''
elif operation == "turn":
temp = [turn_renew_particle(motion.rotateAngle, list(particle)) for particle in particles.data]
'''if angleChecks > particles.n*0.6:
skip = True'''
# print "angleChecks " + str(angleChecks)
# angleChecks = 0
#print "temp " + str(temp)
if skip:
particles.data = [(temp[i][0],temp[i][1],temp[i][2],particles.data[i][3]) for i in range(len(temp))]
if not skip:
particles.data = temp
normalise_weight(particles)
resampling_particles(particles)
#print "after particles " + str(particles.data)
particles.update(particles.data)
return particles
def update_weight(particle,sum):
particle[3] = particle[3]/sum
return tuple(particle)
def normalise_weight(particles):
'''normalise weight of all particles'''
sum = 0
for particle in particles.data:
sum += particle[3]
particles.data = [update_weight(list(particle),sum) for particle in particles.data]
def update_particle(particle,new_particle,n):
particle[0] = new_particle[0]
particle[1] = new_particle[1]
particle[2] = new_particle[2]
particle[3] = 1.0/float(n)
return tuple(particle)
def resampling_particles(particles):
''' resampling all particles'''
array = [0]*particles.n
array[0] = particles.data[0][3]
for i in range(1,particles.n):
array[i] = array[i - 1]+particles.data[i][3]
# sample 100 new particles
counter = 0
new_particles = [None]*particles.n
while counter < particles.n:
random_number = random.random()
for i in range(len(array)):
if random_number< array[i]:
idx = i
break
new_particles[counter] = list(particles.data[idx])
counter = counter + 1
particles.data = [update_particle(list(particles.data[i]),new_particles[i],particles.n) for i in range(particles.n)]
def estCurrentlocation():
current_x = 0
current_y = 0
current_theta = 0
for i in range(particles.n):
current_x = current_x + particles.data[i][0]*particles.data[i][3]
current_y = current_y + particles.data[i][1]*particles.data[i][3]
current_theta = current_theta + particles.data[i][2]*particles.data[i][3]
print "Current position " + str([current_x,current_y,current_theta*180/math.pi])
return [current_x, current_y, current_theta]
def calibration(x,y):
'''function to return the degree to rotate
and distance to move in order to get to
given waypoint
'''
[current_x, current_y, current_theta] = estCurrentlocation()
print "Navigate to " + str([x,y])
distance = pow(pow(x-current_x,2)+pow(y-current_y,2),0.5)
dif_y = float(round(y,3)-round(current_y,3))
dif_x = float(round(x,3)-round(current_x,3))
if dif_x == 0:
if y > current_y:
degree = math.pi/2 - current_theta
elif y < current_y:
degree = -math.pi/2 - current_theta
else:
degree = math.atan(dif_y/dif_x)
if dif_y >= 0 and dif_x >0:
degree = degree - current_theta
elif dif_y >= 0 and dif_x< 0:
degree = degree - current_theta + math.pi
elif dif_y <=0 and dif_x > 0:
degree = degree - current_theta
elif dif_y <= 0 and dif_x < 0:
degree = degree - current_theta - math.pi
if degree > math.pi:
degree -= math.pi*2
elif degree < -math.pi:
degree += math.pi*2
return [degree, distance]
def navigateToWaypoint(x, y):
'''navigate to waypoint from current location
based on necessary rotating degree and distance to
move forward, which are generated by calibration
'''
[degree, distance] = calibration(x, y)
# turn to the right direction
# when beginning navigating to a new waypoint
print "turn " + str(degree/math.pi*180)
turn(degree/math.pi*180)
particles.draw()
# calibrate to see if it needs to further rotate
motion = Coordinate(rotateAngle = degree)
renew_particles(motion, "turn")
[degree, distance] = calibration(x, y)
global hit
while distance > 2:
if (bumper_left_hit() or bumper_right_hit()):
hit = True
break
print "distance " + str(distance)
# while the degree it think it needs to rotate not
# too far away from 0 i.e not trivial
while abs(degree) > 0.05:
print "turn " + str(degree/math.pi*180)
# turn
turn(degree/math.pi*180)
particles.draw()
# calibrate to see if it needs to further rotate
motion = Coordinate(rotateAngle = degree)
renew_particles(motion, "turn")
[degree, distance] = calibration(x, y)
time.sleep(0.5)
print "forward " + str(distance)
if distance > 20:
forward(20)
particles.draw()
motion = Coordinate(forwardDis = 20)
else:
forward(distance)
particles.draw()
motion = Coordinate(forwardDis = distance)
renew_particles(motion, "forward")
[degree, distance] = calibration(x, y)
time.sleep(0.5)
print "\n"
'''if __name__ == "__main__":
initialise()
particles = pds.Particles()
particles.draw()
for way in pds.waypoint.walls:
if hit:
break
print [way[2], way[3]]
navigateToWaypoint(way[2], way[3])
print "\n"
print "stop"'''
| {
"content_hash": "ce12bcf9030c84cabe6dd1c7b226bb34",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 133,
"avg_line_length": 31.724867724867725,
"alnum_prop": 0.608155436957972,
"repo_name": "Phoenix1708/Robotics_Imperial",
"id": "5854ab77085268fe062cb6a99d8ad455f5253e79",
"size": "11992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prac-files/Exercise5.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "309660"
},
{
"name": "C++",
"bytes": "207882"
},
{
"name": "D",
"bytes": "12324"
},
{
"name": "Objective-C",
"bytes": "5200"
},
{
"name": "Perl",
"bytes": "3048"
},
{
"name": "Python",
"bytes": "489630"
},
{
"name": "Shell",
"bytes": "3456"
}
],
"symlink_target": ""
} |
import sys, os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import pyflapjackevents
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
pytest.main(self.test_args)
setup(name='pyflapjackevents',
version=pyflapjackevents.__version__,
description="Send status events to flapjack from python.",
long_description=open('README.rst').read(),
classifiers=[],
keywords='',
author='Jose Plana',
author_email='jplana@tuenti.com',
url='http://github.com/tuenti/pyflapjackevents',
license='Apache License, Version 2.0',
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# requirements
],
test_requires=[
'pytest',
],
entry_points="""
# -*- Entry points: -*-
""",
cmdclass = {'test': PyTest},
)
| {
"content_hash": "62f45b29a5ff1043f5521aad691f7321",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 64,
"avg_line_length": 27.177777777777777,
"alnum_prop": 0.6287816843826656,
"repo_name": "tuenti/pyflapjackevents",
"id": "89a83d0da3904dc10805c5c716a7bba19255cefd",
"size": "1865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9985"
}
],
"symlink_target": ""
} |
from fabric.api import *
from fabric.contrib.files import upload_template, append
import time
import config
@task
def uname():
"""Execute uname"""
run("uname -a")
@task
def upgrade():
"""Upgrade a sever"""
sudo("DEBIAN_FRONTEND=noninteractive apt-get update -y")
sudo("DEBIAN_FRONTEND=noninteractive apt-get upgrade -y")
sudo("DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y")
@task
def install_sudo():
"""Install the sudo programm. Need to be runned with root"""
run("apt-get update")
run("apt-get install -y sudo")
@task
def reboot():
"""Reboot a machine"""
x = 5
while x > 0:
print "Rebooting", env.host, "in", x, "seconds..."
time.sleep(1)
x -= 1
sudo("reboot")
@task
def shutdown():
"""Shutdown a machine"""
x = 5
while x > 0:
print "Shutdowning", env.host, "in", x, "seconds..."
time.sleep(1)
x -= 1
sudo("halt")
@task
def copy_key_manager():
"""Copy the script for keymanagement [$AG:NeedKM]"""
if not hasattr(env, 'keymanagerName') or env.keymanagerName == '':
print "No keymanager name !"
return
upload_template('files/updateKeys.sh', '/root/updateKeys.sh', {
'server': env.keymanagerName,
'users': env.keyManagerUsers,
'gestion_adresse': config.GESTION_ADDRESS,
}, use_sudo=True)
sudo("chmod +x /root/updateKeys.sh")
@task
def cron_key_manager():
"""Install the crontab for the keymanagement"""
sudo('touch /tmp/crondump')
with settings(warn_only=True):
sudo('crontab -l > /tmp/crondump')
append('/tmp/crondump', '42 * * * * /root/updateKeys.sh', use_sudo=True)
sudo('crontab /tmp/crondump')
@task
def setup_key_manager():
"""Setup the key manager [$AG:NeedKM]"""
run('mkdir -p ~/.ssh/')
sudo('apt-get install -y ca-certificates')
copy_key_manager()
cron_key_manager()
execute_key_manger()
@task
def execute_key_manger():
"""Execute the keyManager"""
sudo("/root/updateKeys.sh")
@task
def copy_config():
"""Copy config files"""
put(config.AZIMUT_CONFIG + '/.vim*', '~')
put(config.AZIMUT_CONFIG + '/.screenrc', '~')
put(config.AZIMUT_CONFIG + '/.zsh*', '~')
@task
def copy_user_config():
"""Copy the config for a user [$AG:NeedUser]"""
if not hasattr(env, 'fab_user') or env.fab_user == '':
return
put(config.AZIMUT_CONFIG + '/.vim*', '/home/' + env.fab_user + '/')
put(config.AZIMUT_CONFIG + '/.screenrc', '/home/' + env.fab_user + '/')
put(config.AZIMUT_CONFIG + '/.zsh*', '/home/' + env.fab_user + '/')
put(config.AZIMUT_CONFIG + '/.zshrc-user', '/home/' + env.fab_user + '/.zshrc')
@task
def install_base_progs():
"""Install base programms"""
sudo('apt-get install -y zsh screen vim')
@task
def switch_shell_to_zsh():
"""Change the shell to ZSH"""
run('chsh -s /bin/zsh')
@task
def install_rsync():
"""Install rsync"""
sudo("apt-get install rsync")
@task
def add_gestion_for_self_vms():
"""Add a host for gestion vm so they can access the server even if on the same server [$AG:NeedGestion]"""
if not hasattr(env, 'gestion_ip') or env.gestion_ip == '':
return
sudo('echo "' + env.gestion_ip + ' ' + env.gestion_name + '" >> /etc/hosts')
@task
def setup():
"""Setup a new server [$AG:NeedKM][$AG:NeedGestion]"""
execute(install_sudo)
execute(upgrade)
execute(install_base_progs)
execute(add_gestion_for_self_vms)
execute(copy_config)
execute(switch_shell_to_zsh)
execute(install_rsync)
if not hasattr(env, 'keymanagerName') or env.keymanagerName == '':
prompt("Key manager name ?", 'keymanagerName')
prompt("Key manager users ?", 'keyManagerUsers', 'root')
execute(setup_key_manager)
| {
"content_hash": "db1d2bb92f0fe0e1c19cbea5b7b584fc",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 110,
"avg_line_length": 23.906832298136646,
"alnum_prop": 0.606131462717589,
"repo_name": "Azimut-Prod/azimut-deploy",
"id": "116c56b7b8d744231cdf5250501c94194e96096c",
"size": "3849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29504"
},
{
"name": "Shell",
"bytes": "472"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
def update_slug(apps, schema_editor):
Post = apps.get_model('blog', 'Post')
for post in Post.objects.all():
post.slug = str(post.id)
post.save()
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20160711_1512'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(unique=False, default='')
),
migrations.RunPython(update_slug, ),
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(unique=True)
),
]
| {
"content_hash": "2e6fc7bdb3720ae1206b11626b90c4e4",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 60,
"avg_line_length": 23.903225806451612,
"alnum_prop": 0.5695006747638327,
"repo_name": "liuenyan/django-blog",
"id": "0fa06e14f5d873d0b4d152b3cf1e91422fcb791e",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0010_add_slug_to_post.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2412"
},
{
"name": "HTML",
"bytes": "20617"
},
{
"name": "JavaScript",
"bytes": "3790"
},
{
"name": "Python",
"bytes": "30915"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import sys
# This logic is needed when tool.py is invoked directly by the user
# and when the glop directory is neither installed nor automatically
# put in the path (i.e., the user isn't in the directory above this file).
d = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not d in sys.path: # pragma: no cover
sys.path.insert(0, d)
# We use absolute paths rather than relative paths because this file can be
# invoked directly as a script (and isn't considered part of a module in
# that case).
# pylint: disable=wrong-import-position
from glop.ir import Grammar
from glop.compiler import Compiler
from glop.printer import Printer
from glop.host import Host
from glop.interpreter import Interpreter
from glop.parser import Parser
from glop.version import VERSION
# pylint: enable=wrong-import-position
def main(host=None, argv=None):
host = host or Host()
try:
args, err = _parse_args(host, argv)
if err is not None:
return err
grammar, err = _read_grammar(host, args)
if err:
return err
if args.pretty_print:
return _pretty_print_grammar(host, args, grammar)
if args.ast:
contents = json.dumps(grammar.ast, indent=2,
ensure_ascii=False) + '\n'
_write(host, args.output, contents)
return 0
if args.compile:
comp = Compiler(grammar, args.class_name, args.main, args.memoize)
contents = comp.compile()
_write(host, args.output, contents)
if args.output != '-' and args.main:
host.make_executable(args.output)
return 0
return _interpret_grammar(host, args, grammar)
except KeyboardInterrupt:
host.print_('Interrupted, exiting ...', stream=host.stderr)
return 130 # SIGINT
def _parse_args(host, argv):
class ArgumentParser(argparse.ArgumentParser):
status = None
message = None
def exit(self, status=0, message=None):
self.status = status
self.message = message
def error(self, message):
self.exit(2, message)
ap = ArgumentParser(prog='glop', add_help=False)
ap.add_argument('-a', '--ast', action='store_true')
ap.add_argument('-c', '--compile', action='store_true')
ap.add_argument('-e', '--expr', action='store')
ap.add_argument('-h', '--help', action='store_true')
ap.add_argument('-i', '--input', default='-')
ap.add_argument('-n', '--no-appended-newline', action='store_true')
ap.add_argument('-o', '--output')
ap.add_argument('-p', '--pretty-print', action='store_true')
ap.add_argument('-s', '--as-string', action='store_true',
help='return output as a string, not as a JSON object')
ap.add_argument('-V', '--version', action='store_true')
ap.add_argument('--class-name', default='Parser')
ap.add_argument('--memoize', action='store_true', default=False,
help='memoize intermediate results (off by default)')
ap.add_argument('--no-memoize', dest='memoize', action='store_false')
ap.add_argument('--main', action='store_true', default=False,
help='generate a main() wrapper (off by default)')
ap.add_argument('--no-main', dest='main', action='store_false')
ap.add_argument('grammar', nargs='?')
args = ap.parse_args(argv)
usage = '''\
usage: glop [-achnpsV] [-e expr] [-i file] [-o file] [grammar]
-a, --ast dump the ast of the parsed input
-c, --compile compile grammar instead of interpreting it
-e, --expr EXPR use the provided expression as a grammar
-h, --help show this message and exit
-i, --input FILE file to read input from (use '-' for stdin)
-n, --no-appended-newline do not print a newline after output
-o, --output FILE file to write output to (use '-' for stdout)
-p, --pretty-print pretty-print grammar
-s, --as-string print output as a string, not a JSON object
-V, --version print current version (%s)
--class-name CLASS_NAME class name for the generated class when
compiling it (defaults to 'Parser')
--[no-]memoize memoize intermedate results (off by default)
--[no-]main generate a main() wrapper (off by default)
''' % VERSION
if args.version:
host.print_(VERSION)
return None, 0
if args.help:
host.print_(usage)
return None, 0
if ap.status is not None:
host.print_(usage)
host.print_('Error: %s' % ap.message, stream=host.stderr)
return None, ap.status
if not args.expr and not args.grammar:
host.print_(usage)
return None, 2
if not args.output:
if args.compile:
args.output = host.splitext(host.basename(args.grammar))[0] + '.py'
else:
args.output = '-'
return args, None
def _read_grammar(host, args):
if args.expr:
parser = Parser(args.expr, '<expr>')
else:
if not host.exists(args.grammar):
host.print_('Error: no such file: "%s"' % args.grammar,
stream=host.stderr)
return None, 1
grammar_txt = host.read_text_file(args.grammar)
parser = Parser(grammar_txt, args.grammar)
ast, err, _ = parser.parse()
if err:
host.print_(err, stream=host.stderr)
return None, 1
return Grammar(ast), 0
def _pretty_print_grammar(host, args, grammar):
contents = Printer(grammar).dumps()
_write(host, args.output, contents)
return 0
def _interpret_grammar(host, args, grammar):
if args.input == '-':
path, contents = ('<stdin>', host.stdin.read())
else:
path, contents = (args.input, host.read_text_file(args.input))
out, err, _ = Interpreter(grammar, args.memoize).interpret(contents,
path)
if err:
host.print_(err, stream=host.stderr)
return 1
if out is None:
out = ''
if args.as_string:
out = _as_string(out)
else:
out = json.dumps(out, ensure_ascii=False)
if args.no_appended_newline:
eol = ''
else:
eol = '\n'
if args.as_string:
_write(host, args.output, out + eol)
else:
_write(host, args.output, out + eol)
return 0
def _as_string(obj):
if isinstance(obj, list):
return ''.join(_as_string(el) for el in obj)
return str(obj)
def _write(host, path, contents):
if path == '-':
host.print_(contents, end='')
else:
host.write_text_file(path, contents)
if __name__ == '__main__': # pragma: no cover
# These two lines of code are tested when tool.py is invoked directly
# by another file or by the user. In these cases, no coverage data
# is collected because this is running in a subprocess.
sys.exit(main())
| {
"content_hash": "f79e183d1c5c569b2f36d77684d4f597",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 32.68036529680365,
"alnum_prop": 0.5900516976386754,
"repo_name": "dpranke/glop",
"id": "93705e077d486ee0237764cc865980ed5863920e",
"size": "7767",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "glop/tool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "19409"
},
{
"name": "Python",
"bytes": "137086"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import itertools
PYTHON3 = sys.version_info >= (3, 0)
zip_longest = itertools.zip_longest if PYTHON3 else itertools.izip_longest
unicode_t = str if PYTHON3 else unicode
long_t = int if PYTHON3 else long
if not PYTHON3:
# Python 2.x
range = xrange
zip = itertools.izip
| {
"content_hash": "22227b379bec545221a3148b1219c005",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 82,
"avg_line_length": 24.8,
"alnum_prop": 0.7473118279569892,
"repo_name": "kmaehashi/jubakit",
"id": "ad5527172254655b30fa0e99e5725880d710f91d",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jubakit/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111862"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from mainapp.functions.city_to_ags import city_to_ags_all
class Command(BaseCommand):
help = "Queries wikidata to get the ags of a city"
def add_arguments(self, parser):
parser.add_argument("city-name", type=str)
def handle(self, *args, **options):
results = city_to_ags_all(options["city-name"])
if not results:
self.stdout.write(self.style.NOTICE("Not found"))
for i in results:
self.stdout.write("{} {}\n".format(i[0], i[1]))
| {
"content_hash": "9550bd97aea39fb22aec22ade8b3e29f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 61,
"avg_line_length": 32.588235294117645,
"alnum_prop": 0.6425992779783394,
"repo_name": "meine-stadt-transparent/meine-stadt-transparent",
"id": "6b35814eb8baa0bcd3454be63e93adcef8991c26",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mainapp/management/commands/city_to_ags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2397"
},
{
"name": "HTML",
"bytes": "158632"
},
{
"name": "JavaScript",
"bytes": "62206"
},
{
"name": "Python",
"bytes": "601144"
},
{
"name": "SCSS",
"bytes": "40214"
},
{
"name": "Shell",
"bytes": "1363"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitConnectIEC_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitConnectIEC_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitConnectIEC_CompleteLHS, self).__init__(name='HUnitConnectIEC_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitConnectIEC_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class Item(Item) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Item"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Item')
# apply class Content(Content) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Content"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Content')
# apply class Entry(Entry) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__Entry"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Entry')
# apply association null--content-->nullnode
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return attr_value == "content" """
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__directLink_T"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Entryassoc3Content')
# trace association null--trace-->nullnode
self.add_node()
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__trace_link"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Entryassoc4Item')
# Add the edges
self.add_edges([
(2,3), # apply class null(Entry) -> association content
(3,1), # association null -> apply class null(Content)
(2,4), # apply class null(Item) -> backward_association
(4,0), # backward_associationnull -> match_class null(Item)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr12(self, attr_value, this):
return True
def eval_attr13(self, attr_value, this):
return True
# define evaluation methods for each match association.
# define evaluation methods for each apply association.
def eval_attr14(self, attr_value, this):
return attr_value == "content"
def constraint(self, PreNode, graph):
return True
| {
"content_hash": "1b2a8a3bae783eb469837a981ede29dc",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 110,
"avg_line_length": 31.318681318681318,
"alnum_prop": 0.6463157894736842,
"repo_name": "levilucio/SyVOLT",
"id": "fa1e5e57b0760a142e876f2d2c5ab0db88c72653",
"size": "2850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RSS2ATOM/contracts/unit/HUnitConnectIEC_CompleteLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""
partition_suggestion.py
purpose: Given Nx, Ny, Nz and a number of processes, suggest a partitioning strategy that would result in
more-or-less cube-shaped partitions
"""
import random
from collections import deque
def random_partition(factors,n_factors):
"""
factors = list of prime factors of a number
n_factors = three-tuple [#px, #py, #pz] indicating the number
of prime factors that should be chosen (at random) for each direction
returns [px,py,pz]
"""
l_factors = factors[:] # make a local copy
p_list = [1,1,1]
for d in range(3):
for i in range(n_factors[d]):
c = random.choice(l_factors)
l_factors.remove(c)
p_list[d]*=c
return p_list
class Partition:
def __init__(self,Nx,Ny,Nz,part):
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.px = part[0]
self.py = part[1]
self.pz = part[2]
self.set_score()
def set_score(self):
lx = self.Nx/float(self.px)
ly = self.Ny/float(self.py)
lz = self.Nz/float(self.pz)
# obviously, change this measure if it proves crappy.
# estimate surface to volume ratio of the typical partition
vol = lx*ly*lz
surf = 2.*lx*ly + 2.*lx*lz + 2.*lz*ly
self.score = surf/vol
#vol = lx*ly*lz
#surf = 2.*lx*ly + 2.*lx*lz + 2.*lz*ly
#interior = vol - surf
#self.score = (surf/interior)
def get_score(self):
return self.score
def get_partition(self):
return [self.px, self.py, self.pz]
def partitionfunc(n,k,l=1):
'''n is the integer to partition, k is the length of partitions, l is the min partition element size'''
if k < 1:
raise StopIteration
if k == 1:
if n >= l:
yield (n,)
raise StopIteration
for i in range(l,n+1):
for result in partitionfunc(n-i,k-1,i):
yield (i,)+result
def primes(n):
"""
return a list containing the prime factorization of positive integer n
n = positive integer
"""
primfac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primfac.append(d) # supposing you want multiple factors repeated
n //= d
d += 1
if n > 1:
primfac.append(n)
return primfac
def factors(n):
"""
n = positive integer
returns a list of the factors of n in (more-or-less) standard form
"""
return filter(lambda i: n % i == 0, range(1, n + 1))
def part_advisor(Nx,Ny,Nz,num_procs, numTrials = 2000):
"""
Nx = number of points in the x-direction
Ny = number of points in the y-direction
Nz = number of points in the z-direction
num_procs = the number of partitions to create
returns a suggested px,py,pz
"""
p_facts = primes(num_procs)
p_facts.append(1)
p_facts.append(1) # to allow effectively 1-D partitioning if that is best....
bestScore = float("inf")
bestPartition = None
#numTrials = 4000 # not clear to me how big this should be...
for p in partitionfunc(len(p_facts),3):
#print p
"""
set up some partitions and keep track of the one with the best score.
"""
p_deque = deque(p);
for i in range(3):
p_deque.rotate(1) #shift the groupings
# take numTrials samples
for trial in range(numTrials):
r_part = random_partition(p_facts,p_deque)
sample_partition = Partition(Nx,Ny,Nz,r_part)
sample_score = sample_partition.get_score()
if sample_score < bestScore:
bestPartition = Partition(Nx,Ny,Nz,r_part)
return bestPartition.get_partition()
"""
partitionfunc will let me generate groupings of the prime factors
"""
"""
if there are fewer than 3 prime factors, then there is no way to solve
the problem; an error should be returned and the user should be prompted
to provide a value for num_procs that has more prime factors.
"""
if len(p_facts)<3:
print 'Error! num_procs is prime and cannot be used for 3D partitioning'
raise RuntimeError
print p_facts
"""
concept: use the prime factors listed in p_facts
and put them into 3 groups such that, as close as possible,
Nx/g1, Ny/g2 and Nz/g3 are nearly equal.
To do this, for each grouping, I will compute the variance of the partition dimensions.
I will then select the grouping that has the lowest variance.
1. Enumerate all of the possible groupings of the prime factors.
2. Compute the partition dimension variance for each grouping
3. Pick the smallest one.
"""
if __name__=="__main__":
"""
write test code here...
"""
Nx = 150
Ny = 150
Nz = 1000
num_procs = 8
partition = part_advisor(Nx,Ny,Nz,num_procs)
bestPartition = Partition(Nx,Ny,Nz,partition)
print 'Best partition found has score = %g \n'%bestPartition.get_score()
print bestPartition.get_partition()
print 'Block sizes approximately %i x %i x %i'%(Nx/partition[0],Ny/partition[1],Nz/partition[2])
| {
"content_hash": "ada8f0178561764b9abf1f788fceb24a",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 107,
"avg_line_length": 26.715686274509803,
"alnum_prop": 0.5726605504587156,
"repo_name": "stu314159/pyNFC",
"id": "6012a851c28f4c8f0b5400d4df17234619c3c584",
"size": "5450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "partition_suggestion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "51749"
},
{
"name": "MATLAB",
"bytes": "631"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "Python",
"bytes": "218342"
},
{
"name": "Shell",
"bytes": "23943"
}
],
"symlink_target": ""
} |
"""
Classes for generating pillar rooms
"""
from random import Random
from pyherc.aspects import log_debug
from pyherc.generators.level.room.squareroom import SquareRoomGenerator
from pyherc.generators.level.partitioners import section_floor, section_wall
class PillarRoomGenerator():
"""
Class for generating a pillar room
.. versionadded:: 0.8
"""
@log_debug
def __init__(self, floor_tile, corridor_tile, empty_tile, pillar_tile,
level_types):
"""
Default constructor
:param floor_tile: id of the tile to use for floors
:type floor_tile: integer
:param corridor_tile: id of the tile to use for corridor floors
:type corridor_tile: integer
:param empty_tile: id of the empty wall tile
:type empty_tile: integer
:param level_types: types of level this generator can be used
:type level_types: [string]
"""
self.square_generator = SquareRoomGenerator(floor_tile,
empty_tile,
corridor_tile,
level_types)
self.floor_tile = floor_tile
self.corridor_tile = corridor_tile
self.empty_tile = empty_tile
self.level_types = level_types
self.pillar_tile = pillar_tile
self.rng = Random()
def __call__(self, section):
"""
Generate room
"""
self.generate_room(section)
@log_debug
def generate_room(self, section):
"""
Generate room
:param section: section for generator to draw to
:type section: Section
"""
self.square_generator.generate_room(section)
offset = [(1, 1), (-1, 1), (-1, -1), (1, -1)]
for index, corner in enumerate(self.square_generator.room_corners):
self.add_pillar(section, corner, offset[index])
@log_debug
def add_pillar(self, section, corner, pillar):
"""
Add pillar if location is free
"""
location = (corner[0] + pillar[0],
corner[1] + pillar[1])
if section_wall(section, location) == self.empty_tile:
section_wall(section,
location,
self.pillar_tile,
None)
section_floor(section,
location,
None,
None)
| {
"content_hash": "2a7090266c3f6e8c9b48ede8d161d30f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 76,
"avg_line_length": 31.25925925925926,
"alnum_prop": 0.5363349131121643,
"repo_name": "tuturto/pyherc",
"id": "2be18107da73f7c926b4a43ff552fcd4744f9bab",
"size": "3658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyherc/generators/level/room/pillarroom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "8825"
},
{
"name": "HTML",
"bytes": "529"
},
{
"name": "Hy",
"bytes": "603756"
},
{
"name": "Python",
"bytes": "975380"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['MovingMedian'] , ['Seasonal_DayOfMonth'] , ['SVR'] ); | {
"content_hash": "616e1281f140de1549fe8cd2a415113b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 87,
"avg_line_length": 40,
"alnum_prop": 0.7125,
"repo_name": "antoinecarme/pyaf",
"id": "618904be77cf0917e25892401398f93b4bf6e91f",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingMedian_Seasonal_DayOfMonth_SVR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('person', '0025_auto_20160227_1505'),
]
operations = [
migrations.AlterField(
model_name='dentist',
name='register_number',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='patient',
name='tel',
field=models.CharField(blank=True, max_length=250, null=True),
),
]
| {
"content_hash": "9da7ae96707924f02d854a90a19b4e7b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 24.347826086956523,
"alnum_prop": 0.5767857142857142,
"repo_name": "nanomolina/JP",
"id": "39bc8903a08fd25a5772e278aa2856177dd94f0e",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/odontology/person/migrations/0026_auto_20160228_0216.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189771"
},
{
"name": "HTML",
"bytes": "222882"
},
{
"name": "JavaScript",
"bytes": "42164"
},
{
"name": "Python",
"bytes": "191397"
}
],
"symlink_target": ""
} |
import cgi
print("Content-type: text/html")
import sys
import os
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from lib import helpers
print(helpers.render('header', {'title': "Tracks"}))
print('''
<ul><a href="web.py">Web</ul>
''')
print(helpers.render('footer'))
| {
"content_hash": "e118305cc1895a876deaa6d692c264b3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 103,
"avg_line_length": 25.235294117647058,
"alnum_prop": 0.6923076923076923,
"repo_name": "Secretmapper/updevcamp-session-2-dist",
"id": "a94280ad126ce2c88ea324ddc182505e768904ee",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form/cgi-bin/lectures/mvc-5/public/tracks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18069"
}
],
"symlink_target": ""
} |
from helpers.db import db
from uuid import UUID
from enum import Enum
from datetime import datetime
import json
def translate_model_data_to_json_safe_data(pg_data):
if isinstance(pg_data, datetime):
pg_data = pg_data.isoformat()
elif isinstance(pg_data, UUID):
pg_data = str(pg_data)
elif isinstance(pg_data, Enum):
pg_data = pg_data.value
elif isinstance(pg_data, dict):
for key in pg_data:
pg_data[key] = translate_model_data_to_json_safe_data(
pg_data[key])
elif isinstance(pg_data, list):
for index, value in enumerate(pg_data):
pg_data[index] = translate_model_data_to_json_safe_data(
value)
return pg_data
class Base(db.Model):
"""
General abstractions for all models
"""
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime, default=db.func.now())
date_updated = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
def to_dict(self, filters=[]):
_d = {}
for column in self.__table__.columns:
_d[column.name] = getattr(self, column.name)
d = translate_model_data_to_json_safe_data(_d)
if len(filters) > 0:
filtered_keys = set(filters)
for k in filtered_keys:
if k in d:
del d[k]
return d
def to_json(self, filters=[]):
return json.dumps(self.to_dict(filters=filters))
| {
"content_hash": "2624b58fd5a52bf32773a4759bc66666",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 85,
"avg_line_length": 22.133333333333333,
"alnum_prop": 0.6822289156626506,
"repo_name": "davidhariri/api",
"id": "f701ec1dc77669123f5a5502f24c68cb534d9b68",
"size": "1328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27949"
}
],
"symlink_target": ""
} |
import os
import uuid
from google.cloud import firestore
import pytest
import snippets
os.environ['GOOGLE_CLOUD_PROJECT'] = os.environ['FIRESTORE_PROJECT']
UNIQUE_STRING = str(uuid.uuid4()).split("-")[0]
class TestFirestoreClient(firestore.Client):
def __init__(self, *args, **kwargs):
self._UNIQUE_STRING = UNIQUE_STRING
self._super = super(TestFirestoreClient, self)
self._super.__init__(*args, **kwargs)
def collection(self, collection_name, *args, **kwargs):
collection_name += '-{}'.format(self._UNIQUE_STRING)
return self._super.collection(collection_name, *args, **kwargs)
snippets.firestore.Client = TestFirestoreClient
@pytest.fixture
def db():
yield snippets.firestore.Client()
def test_quickstart_new_instance():
snippets.quickstart_new_instance()
def test_quickstart_add_data_two():
snippets.quickstart_add_data_two()
def test_quickstart_get_collection():
snippets.quickstart_get_collection()
def test_quickstart_add_data_one():
snippets.quickstart_add_data_one()
def test_add_from_dict():
snippets.add_from_dict()
def test_add_data_types():
snippets.add_data_types()
def test_add_example_data():
snippets.add_example_data()
def test_array_contains_any(db):
query = snippets.array_contains_any_queries(db)
expected = {'SF', 'LA', 'DC'}
actual = {document.id for document in query.stream()}
assert expected == actual
def test_query_filter_in_query_without_array(db):
query = snippets.in_query_without_array(db)
expected = {'SF', 'LA', 'DC', 'TOK'}
actual = {document.id for document in query.stream()}
assert expected == actual
def test_query_filter_in_query_with_array(db):
query = snippets.in_query_with_array(db)
expected = {'DC'}
actual = {document.id for document in query.stream()}
assert expected == actual
def test_add_custom_class_with_id():
snippets.add_custom_class_with_id()
def test_add_data_with_id():
snippets.add_data_with_id()
def test_add_custom_class_generated_id():
snippets.add_custom_class_generated_id()
def test_add_new_doc():
snippets.add_new_doc()
def test_get_simple_query():
snippets.get_simple_query()
def test_array_contains_filter(capsys):
snippets.array_contains_filter()
out, _ = capsys.readouterr()
assert 'SF' in out
def test_get_full_collection():
snippets.get_full_collection()
def test_get_custom_class():
snippets.get_custom_class()
def test_get_check_exists():
snippets.get_check_exists()
def test_structure_subcollection_ref():
snippets.structure_subcollection_ref()
def test_structure_collection_ref():
snippets.structure_collection_ref()
def test_structure_doc_ref_alternate():
snippets.structure_doc_ref_alternate()
def test_structure_doc_ref():
snippets.structure_doc_ref()
def test_update_create_if_missing():
snippets.update_create_if_missing()
def test_update_doc():
snippets.update_doc()
def test_update_doc_array(capsys):
snippets.update_doc_array()
out, _ = capsys.readouterr()
assert 'greater_virginia' in out
def test_update_multiple():
snippets.update_multiple()
def test_update_server_timestamp(db):
db.collection(u'objects').document(u'some-id').set({'timestamp': 0})
snippets.update_server_timestamp()
def test_update_data_transaction(db):
db.collection('cities').document('SF').set({'population': 1})
snippets.update_data_transaction()
def test_update_data_transaction_result(db):
db.collection('cities').document('SF').set({'population': 1})
snippets.update_data_transaction_result()
def test_update_data_batch(db):
db.collection('cities').document('SF').set({})
db.collection('cities').document('LA').set({})
snippets.update_data_batch()
def test_update_nested():
snippets.update_nested()
def test_compound_query_example():
snippets.compound_query_example()
def test_compound_query_valid_multi_clause():
snippets.compound_query_valid_multi_clause()
def test_compound_query_simple():
snippets.compound_query_simple()
def test_compound_query_invalid_multi_field():
snippets.compound_query_invalid_multi_field()
def test_compound_query_single_clause():
snippets.compound_query_single_clause()
def test_compound_query_valid_single_field():
snippets.compound_query_valid_single_field()
def test_order_simple_limit():
snippets.order_simple_limit()
def test_order_simple_limit_desc():
snippets.order_simple_limit_desc()
def test_order_multiple():
snippets.order_multiple()
def test_order_where_limit():
snippets.order_where_limit()
def test_order_limit_to_last():
snippets.order_limit_to_last()
def test_order_where_invalid():
snippets.order_where_invalid()
def test_order_where_valid():
snippets.order_where_valid()
def test_cursor_simple_start_at():
snippets.cursor_simple_start_at()
def test_cursor_simple_end_at():
snippets.cursor_simple_end_at()
def test_snapshot_cursors(capsys):
snippets.snapshot_cursors()
out, _ = capsys.readouterr()
assert 'SF' in out
assert 'TOK' in out
assert 'BJ' in out
def test_cursor_paginate():
snippets.cursor_paginate()
def test_cursor_multiple_conditions():
snippets.cursor_multiple_conditions()
@pytest.mark.flaky(max_runs=3)
def test_listen_document(capsys):
snippets.listen_document()
out, _ = capsys.readouterr()
assert 'Received document snapshot: SF' in out
@pytest.mark.flaky(max_runs=3)
def test_listen_multiple(capsys):
snippets.listen_multiple()
out, _ = capsys.readouterr()
assert 'Current cities in California:' in out
assert 'SF' in out
@pytest.mark.flaky(max_runs=3)
def test_listen_for_changes(capsys):
snippets.listen_for_changes()
out, _ = capsys.readouterr()
assert 'New city: MTV' in out
assert 'Modified city: MTV' in out
assert 'Removed city: MTV' in out
def test_delete_single_doc():
snippets.delete_single_doc()
def test_delete_field(db):
db.collection('cities').document('BJ').set({'capital': True})
snippets.delete_field()
def test_delete_full_collection():
snippets.delete_full_collection()
@pytest.mark.skip(reason="Dependant on a composite index being created,"
"however creation of the index is dependent on"
"having the admin client and definition integrated"
"into the test setup")
# TODO: b/132092178
def test_collection_group_query(db):
museum_docs = snippets.collection_group_query(db)
names = set([museum.name for museum in museum_docs])
assert names == {u'Legion of Honor', u'The Getty',
u'National Air and Space Museum',
u'National Museum of Nature and Science',
u'Beijing Ancient Observatory'}
def test_list_document_subcollections():
snippets.list_document_subcollections()
def test_create_and_build_bundle():
bundle, buffer = snippets.create_and_build_bundle()
assert "latest-stories-query" in bundle.named_queries
| {
"content_hash": "d0e98ed65fcf21acea7542917fd5a5a9",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 76,
"avg_line_length": 22.5062893081761,
"alnum_prop": 0.6861813609054073,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "46cfc01e55846f144177e5579813672853ad81b7",
"size": "7733",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "firestore/cloud-client/snippets_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from ..models.foo import Foo
admin.site.register(Foo)
| {
"content_hash": "45a3cacef5538d9d71ba4bebb38a6b3f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 18.8,
"alnum_prop": 0.7446808510638298,
"repo_name": "yephper/django",
"id": "d1a663027a0c932acc4c679366149fa51c589f34",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/admin_scripts/complex_app/admin/foo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""
Holds event definitions to be used by services for generating events
"""
from .base import EventBase
from .types import (
ActivePolicyBundleContentChanged,
ActivePolicyBundleIdChanged,
FeedGroupSyncCompleted,
FeedGroupSyncFailed,
FeedGroupSyncStarted,
FeedSyncCompleted,
FeedSyncFailed,
FeedSyncStarted,
FeedSyncTaskCompleted,
FeedSyncTaskFailed,
FeedSyncTaskStarted,
ImageAnalysisFailed,
ImageAnalysisSuccess,
ImageArchived,
ImageArchiveDeleted,
ImageArchiveDeleteFailed,
ImageArchivingFailed,
ImageRegistryLookupFailed,
ImageRestored,
ImageRestoreFailed,
ListTagsFailed,
PolicyEngineLoadAnalysisFailed,
RandomWisdomEvent,
SaveAnalysisFailed,
ServiceAuthzPluginHealthCheckFailed,
ServiceDowned,
ServiceOrphaned,
ServiceRemoved,
TagManifestParseFailed,
TagPolicyEvaluationUpdated,
TagVulnerabilityUpdated,
UserAnalyzeImageCompleted,
UserAnalyzeImageFailed,
)
## TODO: Update refs in __init__ to types.py and fix code instances for invocation of events. Then, add API call.
| {
"content_hash": "97c5429c5f70cf8bd54bf7d6f96d5816",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 113,
"avg_line_length": 26.452380952380953,
"alnum_prop": 0.7632763276327633,
"repo_name": "anchore/anchore-engine",
"id": "f92cdd22cf4844b160d0d875a200886844e27e74",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anchore_engine/subsys/events/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
"""
TNC: A Python interface to the TNC non-linear optimizer
TNC is a non-linear optimizer. To use it, you must provide a function to
minimize. The function must take one argument: the list of coordinates where to
evaluate the function; and it must return either a tuple, whose first element is the
value of the function, and whose second argument is the gradient of the function
(as a list of values); or None, to abort the minimization.
"""
from __future__ import division, print_function, absolute_import
from scipy.optimize import moduleTNC
from .optimize import (MemoizeJac, OptimizeResult, _check_unknown_options,
_prepare_scalar_function)
from ._constraints import old_bound_to_new
from numpy import inf, array, zeros, asfarray
__all__ = ['fmin_tnc']
MSG_NONE = 0 # No messages
MSG_ITER = 1 # One line per iteration
MSG_INFO = 2 # Informational messages
MSG_VERS = 4 # Version info
MSG_EXIT = 8 # Exit reasons
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
MSGS = {
MSG_NONE: "No messages",
MSG_ITER: "One line per iteration",
MSG_INFO: "Informational messages",
MSG_VERS: "Version info",
MSG_EXIT: "Exit reasons",
MSG_ALL: "All messages"
}
INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
MAXFUN = 3 # Max. number of function evaluations reached
LSFAIL = 4 # Linear search failed
CONSTANT = 5 # All lower bounds are equal to the upper bounds
NOPROGRESS = 6 # Unable to progress
USERABORT = 7 # User requested end of minimization
RCSTRINGS = {
INFEASIBLE: "Infeasible (lower bound > upper bound)",
LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
MAXFUN: "Max. number of function evaluations reached",
LSFAIL: "Linear search failed",
CONSTANT: "All lower bounds are equal to the upper bounds",
NOPROGRESS: "Unable to progress",
USERABORT: "User requested end of minimization"
}
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
# SciPy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
bounds=None, epsilon=1e-8, scale=None, offset=None,
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
rescale=-1, disp=None, callback=None):
"""
Minimize a function with variables subject to bounds, using
gradient information in a truncated Newton algorithm. This
method wraps a C implementation of the algorithm.
Parameters
----------
func : callable ``func(x, *args)``
Function to minimize. Must do one of:
1. Return f and g, where f is the value of the function and g its
gradient (a list of floats).
2. Return the function value but supply gradient function
separately as `fprime`.
3. Return the function value and set ``approx_grad=True``.
If the function returns None, the minimization
is aborted.
x0 : array_like
Initial estimate of minimum.
fprime : callable ``fprime(x, *args)``, optional
Gradient of `func`. If None, then either `func` must return the
function value and the gradient (``f,g = func(x, *args)``)
or `approx_grad` must be True.
args : tuple, optional
Arguments to pass to function.
approx_grad : bool, optional
If true, approximate the gradient numerically.
bounds : list, optional
(min, max) pairs for each element in x0, defining the
bounds on that parameter. Use None or +/-inf for one of
min or max when there is no bound in that direction.
epsilon : float, optional
Used if approx_grad is True. The stepsize in a finite
difference approximation for fprime.
scale : array_like, optional
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x| for the others. Defaults to None.
offset : array_like, optional
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
messages : int, optional
Bit mask used to select messages display during
minimization values defined in the MSGS dict. Defaults to
MGS_ALL.
disp : int, optional
Integer interface to messages. 0 = no message, 5 = all messages
maxCGit : int, optional
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxfun : int, optional
Maximum number of function evaluation. If None, maxfun is
set to max(100, 10*len(x0)). Defaults to None.
eta : float, optional
Severity of the line search. If < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float, optional
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float, optional
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
fmin : float, optional
Minimum function value estimate. Defaults to 0.
ftol : float, optional
Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float, optional
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
pgtol : float, optional
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float, optional
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
Returns
-------
x : ndarray
The solution.
nfeval : int
The number of function evaluations.
rc : int
Return code, see below
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'TNC' `method` in particular.
Notes
-----
The underlying algorithm is truncated Newton, also called
Newton Conjugate-Gradient. This method differs from
scipy.optimize.fmin_ncg in that
1. it wraps a C implementation of the algorithm
2. it allows each variable to be given an upper and lower bound.
The algorithm incorporates the bound constraints by determining
the descent direction as in an unconstrained truncated Newton,
but never taking a step-size large enough to leave the space
of feasible x's. The algorithm keeps track of a set of
currently active constraints, and ignores them when computing
the minimum allowable step size. (The x's associated with the
active constraint are kept fixed.) If the maximum allowable
step size is zero then a new constraint is added. At the end
of each iteration one of the constraints may be deemed no
longer active and removed. A constraint is considered
no longer active is if it is currently active
but the gradient for that variable points inward from the
constraint. The specific constraint removed is the one
associated with the variable of largest index whose
constraint is no longer active.
Return codes are defined as follows::
-1 : Infeasible (lower bound > upper bound)
0 : Local minimum reached (|pg| ~= 0)
1 : Converged (|f_n-f_(n-1)| ~= 0)
2 : Converged (|x_n-x_(n-1)| ~= 0)
3 : Max. number of function evaluations reached
4 : Linear search failed
5 : All lower bounds are equal to the upper bounds
6 : Unable to progress
7 : User requested end of minimization
References
----------
Wright S., Nocedal J. (2006), 'Numerical Optimization'
Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
SIAM Journal of Numerical Analysis 21, pp. 770-778
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
if disp is not None: # disp takes precedence over messages
mesg_num = disp
else:
mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
# build options
opts = {'eps': epsilon,
'scale': scale,
'offset': offset,
'mesg_num': mesg_num,
'maxCGit': maxCGit,
'maxiter': maxfun,
'eta': eta,
'stepmx': stepmx,
'accuracy': accuracy,
'minfev': fmin,
'ftol': ftol,
'xtol': xtol,
'gtol': pgtol,
'rescale': rescale,
'disp': False}
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
return res['x'], res['nfev'], res['status']
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
eps=1e-8, scale=None, offset=None, mesg_num=None,
maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
callback=None, finite_diff_rel_step=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using a truncated
Newton (TNC) algorithm.
Options
-------
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None.
offset : float
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
disp : bool
Set to True to print convergence messages.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. If None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. If < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
fmin = minfev
pgtol = gtol
x0 = asfarray(x0).flatten()
n = len(x0)
if bounds is None:
bounds = [(None,None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
new_bounds = old_bound_to_new(bounds)
if mesg_num is not None:
messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
elif disp:
messages = MSG_ALL
else:
messages = MSG_NONE
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step,
bounds=new_bounds)
func_and_grad = sf.fun_and_grad
"""
low, up : the bounds (lists of floats)
if low is None, the lower bounds are removed.
if up is None, the upper bounds are removed.
low and up defaults to None
"""
low = zeros(n)
up = zeros(n)
for i in range(n):
if bounds[i] is None:
l, u = -inf, inf
else:
l,u = bounds[i]
if l is None:
low[i] = -inf
else:
low[i] = l
if u is None:
up[i] = inf
else:
up[i] = u
if scale is None:
scale = array([])
if offset is None:
offset = array([])
if maxfun is None:
maxfun = max(100, 10*len(x0))
rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
offset, messages, maxCGit, maxfun,
eta, stepmx, accuracy, fmin, ftol,
xtol, pgtol, rescale, callback)
funv, jacv = func_and_grad(x)
return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=sf.nfev,
nit=nit, status=rc, message=RCSTRINGS[rc],
success=(-1 < rc < 3))
if __name__ == '__main__':
# Examples for TNC
def example():
print("Example")
# A function to minimize
def function(x):
f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
g = [0,0]
g[0] = 2.0*x[0]
g[1] = 3.0*pow(abs(x[1]),2.0)
if x[1] < 0:
g[1] = -g[1]
return f, g
# Optimizer call
x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10]))
print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc])
print("x =", x)
print("exact value = [0, 1]")
print()
example()
| {
"content_hash": "d9ed8f19b3073892fb388d657486c998",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 84,
"avg_line_length": 37.791469194312796,
"alnum_prop": 0.6082894406822172,
"repo_name": "arokem/scipy",
"id": "3cc9e64429c4a1cf0c4ba7b5e896f5b34fbbeb5b",
"size": "17152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/optimize/tnc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4399737"
},
{
"name": "C++",
"bytes": "649740"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368728"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12815696"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import logging
from typing import Optional
import nbformat
from great_expectations import DataContext
from great_expectations.datasource.types import DatasourceTypes
from great_expectations.render.renderer.notebook_renderer import BaseNotebookRenderer
try:
import black
except ImportError:
black = None
logger = logging.getLogger(__name__)
class DatasourceNewNotebookRenderer(BaseNotebookRenderer):
SQL_DOCS = """\
### For SQL based Datasources:
Here we are creating an example configuration based on the database backend you specified in the CLI. The configuration contains an **InferredAssetSqlDataConnector**, which will add a Data Asset for each table in the database, a **ConfiguredAssetDataConnector**, which will add explicitly defined Data Assets, and a **RuntimeDataConnector**, which can accept SQL queries.
If any of these configuration options are not applicable, they can be removed. This is just an example, and you may customize this as you wish!
Also, if you would like to learn more about the **DataConnectors** used in this configuration, please see our docs on [InferredAssetDataConnectors](https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_configure_an_inferredassetdataconnector), [ConfiguredAssetDataConnectors](https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_configure_a_configuredassetdataconnector), and [RuntimeDataConnectors](https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_configure_a_runtimedataconnector).
Credentials will not be saved until you run the last cell. The credentials will be saved in `uncommitted/config_variables.yml` which should not be added to source control."""
FILES_DOCS = """### For files based Datasources:
Here we are creating an example configuration. The configuration contains an **InferredAssetFilesystemDataConnector** which will add a Data Asset for each file in the base directory you provided. It also contains a **RuntimeDataConnector** which can accept filepaths. This is just an example, and you may customize this as you wish!
Also, if you would like to learn more about the **DataConnectors** used in this configuration, including other methods to organize assets, handle multi-file assets, name assets based on parts of a filename, please see our docs on [InferredAssetDataConnectors](https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_configure_an_inferredassetdataconnector) and [RuntimeDataConnectors](https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/how_to_configure_a_runtimedataconnector).
"""
DOCS_INTRO = """## Customize Your Datasource Configuration
**If you are new to Great Expectations Datasources,** you should check out our [how-to documentation](https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/connect_to_data_overview)
**My configuration is not so simple - are there more advanced options?**
Glad you asked! Datasources are versatile. Please see our [How To Guides](https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/connect_to_data_overview)!
Give your datasource a unique name:"""
def __init__(
self,
context: DataContext,
datasource_type: DatasourceTypes,
datasource_yaml: str,
datasource_name: str = "my_datasource",
sql_credentials_snippet: Optional[str] = None,
) -> None:
super().__init__(context=context)
self.datasource_type = datasource_type
self.datasource_yaml = datasource_yaml
self.sql_credentials_code_snippet = sql_credentials_snippet
if datasource_name is None:
datasource_name = "my_datasource"
self.datasource_name = datasource_name
def _add_header(self) -> None:
self.add_markdown_cell(
f"""# Create a new {self.datasource_type.value} Datasource
Use this notebook to configure a new {self.datasource_type.value} Datasource and add it to your project."""
)
self.add_code_cell(
"""import great_expectations as ge
from great_expectations.cli.datasource import sanitize_yaml_and_save_datasource, check_if_datasource_name_exists
context = ge.get_context()""",
)
def _add_docs_cell(self) -> None:
self.add_markdown_cell(self.DOCS_INTRO)
self.add_code_cell(f'datasource_name = "{self.datasource_name}"')
if self.datasource_type in [DatasourceTypes.PANDAS, DatasourceTypes.SPARK]:
self.add_markdown_cell(self.FILES_DOCS)
elif self.datasource_type == DatasourceTypes.SQL:
self.add_markdown_cell(self.SQL_DOCS)
def _add_sql_credentials_cell(self) -> None:
self.add_code_cell(self.sql_credentials_code_snippet)
def _add_template_cell(self, lint: bool = True) -> None:
self.add_code_cell(
f"""example_yaml = {self.datasource_yaml}
print(example_yaml)""",
lint=lint,
)
def _add_test_yaml_cells(self, lint: bool = True) -> None:
self.add_markdown_cell(
"""\
# Test Your Datasource Configuration
Here we will test your Datasource configuration to make sure it is valid.
This `test_yaml_config()` function is meant to enable fast dev loops. **If your
configuration is correct, this cell will show you some snippets of the data
assets in the data source.** You can continually edit your Datasource config
yaml and re-run the cell to check until the new config is valid.
If you instead wish to use python instead of yaml to configure your Datasource,
you can use `context.add_datasource()` and specify all the required parameters."""
)
self.add_code_cell(
"context.test_yaml_config(yaml_config=example_yaml)",
lint=lint,
)
def _add_save_datasource_cell(self, lint: bool = True) -> None:
self.add_markdown_cell(
"""## Save Your Datasource Configuration
Here we will save your Datasource in your Data Context once you are satisfied with the configuration. Note that `overwrite_existing` defaults to False, but you may change it to True if you wish to overwrite. Please note that if you wish to include comments you must add them directly to your `great_expectations.yml`."""
)
self.add_code_cell(
"""sanitize_yaml_and_save_datasource(context, example_yaml, overwrite_existing=False)
context.list_datasources()""",
lint=lint,
)
self.add_markdown_cell("Now you can close this notebook and delete it!")
def render(self) -> nbformat.NotebookNode:
self._notebook: nbformat.NotebookNode = nbformat.v4.new_notebook()
self._add_header()
self._add_docs_cell()
if self.datasource_type == DatasourceTypes.SQL:
self._add_sql_credentials_cell()
lint = black is not None
if not lint:
logger.warning(
"Please install the optional dependency 'black' to enable linting. Returning input with no changes."
)
self._add_template_cell(lint)
self._add_test_yaml_cells(lint)
self._add_save_datasource_cell(lint)
return self._notebook
def render_to_disk(
self,
notebook_file_path: str,
**kwargs: dict,
) -> None:
self.render()
self.write_notebook_to_disk(self._notebook, notebook_file_path)
| {
"content_hash": "704047da7d747d29ace1d81aa749e229",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 558,
"avg_line_length": 50.71917808219178,
"alnum_prop": 0.7133018230925051,
"repo_name": "great-expectations/great_expectations",
"id": "ec18709018ae8a7bbab6e929d121aaf9f07fb885",
"size": "7405",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "great_expectations/render/renderer/datasource_new_notebook_renderer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('populous.inlines.views.admin',
url(r'^render/$', 'render', name='inlines-admin-render'),
url(r'^(?P<app_label>[-\w]+)/(?P<inline_name>[-\w]+)/form/$', 'form', name='inlines-admin-form'),
)
| {
"content_hash": "2bd0c66bbf63bc6a54352535813dfa86",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 101,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.6412213740458015,
"repo_name": "caiges/populous",
"id": "24a4c2fe1e9571447b0dc294555843f63b8582a0",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "populous/inlines/urls/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from functools import wraps
import sys
import warnings
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured # NOQA
from django.db.models.query import Q, QuerySet, Prefetch # NOQA
from django.db.models.expressions import ExpressionNode, F, Value, Func, Case, When # NOQA
from django.db.models.manager import Manager # NOQA
from django.db.models.base import Model # NOQA
from django.db.models.aggregates import * # NOQA
from django.db.models.fields import * # NOQA
from django.db.models.fields.subclassing import SubfieldBase # NOQA
from django.db.models.fields.files import FileField, ImageField # NOQA
from django.db.models.fields.related import ( # NOQA
ForeignKey, ForeignObject, OneToOneField, ManyToManyField,
ManyToOneRel, ManyToManyRel, OneToOneRel)
from django.db.models.fields.proxy import OrderWrt # NOQA
from django.db.models.deletion import ( # NOQA
CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError)
from django.db.models.lookups import Lookup, Transform # NOQA
from django.db.models import signals # NOQA
from django.utils.deprecation import RemovedInDjango19Warning
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
# Deprecated aliases for functions were exposed in this module.
def make_alias(function_name):
# Close function_name.
def alias(*args, **kwargs):
warnings.warn(
"django.db.models.%s is deprecated." % function_name,
RemovedInDjango19Warning, stacklevel=2)
# This raises a second warning.
from . import loading
return getattr(loading, function_name)(*args, **kwargs)
alias.__name__ = function_name
return alias
this_module = sys.modules['django.db.models']
for function_name in ('get_apps', 'get_app_path', 'get_app_paths', 'get_app',
'get_models', 'get_model', 'register_models'):
setattr(this_module, function_name, make_alias(function_name))
del this_module, make_alias, function_name
| {
"content_hash": "96fab16d611ae1612928417de2b70d18",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 91,
"avg_line_length": 38.75,
"alnum_prop": 0.7137096774193549,
"repo_name": "runekaagaard/django-contrib-locking",
"id": "9348529625441aa4ecb462c3ceef6a9199d7dc6c",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/models/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53566"
},
{
"name": "JavaScript",
"bytes": "106009"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10638047"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
from . import ping
from . import sitemaps | {
"content_hash": "9535efd176117ae1a58adec3f45d6aa6",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 20.5,
"alnum_prop": 0.7804878048780488,
"repo_name": "patrykomiotek/seo-monitor-api",
"id": "d4fe734db4835aeab52ec96f2054b6c22f7f74be",
"size": "41",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "app/endpoints/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7153"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
} |
from sklearn.neighbors import *
from numpy import genfromtxt,zeros,savetxt,tile,where
def fileToMat(file,labelsPresent):
labels=None
fileData = genfromtxt(file,delimiter=',',skip_header=1,dtype="int")
if labelsPresent:
labels=fileData[:,0]
fileData=fileData[:,1:]
return fileData,labels
def testClassifier():
data,labels = fileToMat("data/train.csv",True)
normData,ranges, min = normalize(data)
testPercent=0.1
numTestVectors = int(testPercent*data.shape[0])
for k in range(3,20,2):
neigh = KNeighborsClassifier(n_neighbors=k,algorithm="ball_tree")
neigh.fit(normData[numTestVectors:,],labels[numTestVectors:,])
errorCount = 0.0
for i in range(numTestVectors):
classifiedLabel = neigh.predict(normData[i])
if(classifiedLabel!=labels[i]):
errorCount=errorCount+1.0
print r'K:%d,Error Rate:%f'%(k,((errorCount/float(numTestVectors))*100))
#didn't work gave 66% error rate.
def normalize(data):
min = data.min(0)
max = data.max(0)
ranges = max-min
denominator = tile(ranges,(data.shape[0],1))
normData = ((data-tile(min,(data.shape[0],1)))/denominator)
return normData,ranges,min
def digitRecognizer():
trainData,labels=fileToMat("data/train.csv",True)
testData,trainLabels = fileToMat("data/test.csv",False)
classifiedResult = zeros((testData.shape[0],2))
neigh = KNeighborsClassifier(n_neighbors=3,)
neigh.fit(trainData,labels)
for i in range(testData.shape[0]):
classifiedResult[i,0]=i+1
classifiedResult[i,1]=neigh.predict(testData[i])
print "%d "%(i+1)
savetxt("testResult_sklearn_3.csv",classifiedResult,delimiter=',',fmt="%d,%d",header='ImageId,Label')
if __name__=="__main__":
digitRecognizer()
| {
"content_hash": "b8b064c17dd26618078036737901f3ab",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 105,
"avg_line_length": 37.08163265306123,
"alnum_prop": 0.6653824986241057,
"repo_name": "apatti/apatti_ml",
"id": "e7e560c02cab954ea782c45adce96c97c365bcaf",
"size": "1817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaggle/digit-recognizer/knn_sklearn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1505202"
},
{
"name": "Python",
"bytes": "32838"
},
{
"name": "R",
"bytes": "5905"
},
{
"name": "Roff",
"bytes": "2683772"
}
],
"symlink_target": ""
} |
import sys, os
import numpy as np
import pandas as pd
import scipy.stats as stats
from StringIO import StringIO
from random import randrange, sample, shuffle
import re
from functools import partial
from itertools import chain
from operator import itemgetter
from collections import OrderedDict
## 3rd party
from configobj import ConfigObj, flatten_errors
#from validate import Validator
# utility functions
def str2dict(s):
"""Parsing string (format: 'item:value,item:value')
to create a dict object.
"""
if hasattr(s, 'split'):
l = re.split('[:,]', s)
try:
return {k.lower():float(v) for k,v in zip(l[0::2],l[1::2])}
except TypeError:
msg = 'distribution parameter values must be ints or floats.'
raise TypeError(msg)
else:
return s
def random_insert_seq(l, seq):
"""Insert seq items at random locations in list.
Paramters
---------
l : list
target list object
seq : iteralbe
items to insert in the list
Returns
-------
list : target list with `seq` values randomly inserted
"""
insert_locs = sample(xrange(len(l) + len(seq)), len(seq))
inserts = dict(zip(insert_locs, seq))
inputs = iter(l)
return [inserts[pos] if pos in inserts else next(inputs)
for pos in xrange(len(l) + len(seq))]
def power_neg(*args, **kwargs):
return 1 - np.random.power(*args, **kwargs)
class _Comm(object):
"""Parent class for other classes in the module."""
def __init__(self):
pass
# property/setter
@property
def abund_dist(self):
return self._abund_dist
@abund_dist.setter
def abund_dist(self, x):
self._abund_dist = str(x)
@property
def richness(self):
return self._richness
@richness.setter
def richness(self, x):
x = float(x)
if x <= 1:
# x = fraction of total taxa pool
# setting x as that fraction number of taxa
x = len(self.taxon_pool) * x
self._richness = int(round(x,0))
if self._richness < 1:
self._richness = 1
class SimComms(_Comm):
"""Class for simulating taxon count data of communities."""
def __init__(self, taxon_list, perm_perc, shared_perc,
richness, abund_dist, abund_dist_params,
n_comm, config=None,
*args, **kwargs):
"""
Parameters
----------
See gradientComms
"""
_Comm.__init__(self, *args, **kwargs)
self._load_taxon_list(taxon_list)
self.perm_perc = perm_perc
self.shared_perc = shared_perc
self.richness = richness
self.abund_dist = abund_dist
self.abund_dist_params = str2dict(abund_dist_params)
self.config = config
self.n_comm = n_comm
# loading config; setting community parameters
if config is not None:
self.comm_params = self._load_config()
else:
self.comm_params = dict()
self._set_comm_params()
# lowering comm richness if taxon pool is limiting
## otherwise, shared_perc option throws and error
if self.shared_perc < 100:
self._lower_richness()
# shared taxa
self._set_shared_taxa()
def _get_configspec(self, strIO=True):
"""Return configspec set for instance.
Parameters
----------
strIO : bool
return configspec as a StringIO instance
Returns
-------
configspec object
"""
configspec = """
[__many__]
richness = float(0,inf, default=None)
abund_dist = string(default='exponential,1,0.5')
start = float(default=None)
end = float(default=None)
loc = float(default=None)
scale = float(default=None)
sigma = float(default=None)
"""
if strIO == True:
return StringIO(configspec)
else:
return configspec
def _load_config(self):
assert hasattr(self, 'config'), "No config attribute found."
configspec = self._get_configspec()
return ConfigObj(self.config, configspec=configspec)
def _set_comm_params(self):
"""Setting community-specific params including applying global params.
"""
# adding to comm params if not enough set by config
n_config_comms = len(self.comm_params.keys())
n_diff = self.n_comm - n_config_comms
for i in xrange(n_diff):
self.comm_params[str(n_config_comms + i + 1)] = dict()
for k,v in self.comm_params.items():
# checking for params
if ('richness' not in v.keys() or
v['richness'] is None):
v['richness'] = self.richness
if ('abund_dist' not in v.keys() or
v['abund_dist'] is None):
v['abund_dist'] = self.abund_dist
if ('abund_dist_p' not in v.keys() or
v['abund_dist_p'] is None):
v['abund_dist_p'] = self.abund_dist_params
v['abund_dist_p'] = str2dict(v['abund_dist_p'])
def _set_shared_taxa(self):
"""A list of taxa shared among all communities.
The taxon list (pool) is reduced to just unshared taxa.
"""
self.shared_taxa = self._drawFromTaxonPool(self.n_shared)
def _load_taxon_list(self, fileName):
"""Loading taxon list file. Taxa order is randomly shuffled.
Parameters
----------
fileName : str
name of taxon file
"""
self.taxon_pool = []
if fileName == '-':
for l in sys.stdin:
x = l.rstrip().split('\t')
self.taxon_pool.append(x[0])
else:
with open(fileName, 'r') as inF:
for l in inF.readlines():
x = l.rstrip().split('\t')
self.taxon_pool.append(x[0])
shuffle(self.taxon_pool)
def _drawFromTaxonPool(self, n):
"""Draw from taxon pool, returning n-taxa;
those taxa are removed from the pool.
Parameters
----------
n : int
number of taxa to draw
Returns
-------
list : [taxon_name1, taxon_nameN, ...]
"""
assert n <= len(self.taxon_pool), \
'Cannot draw {} taxa from taxon pool'.format(n)
taxa = self.taxon_pool[:n]
self.taxon_pool = self.taxon_pool[n:]
return taxa
def _lower_richness(self):
"""Lowering the richness of each community if the number of
unique taxa (un-shared) + shared taxa is greater than the taxon
pool from which to draw taxa.
"""
rich = []
for k,v in self.comm_params.items():
try:
rich.append(v['richness'])
except KeyError:
msg = 'Cannot find "richness" attribute for Comm {}'
raise KeyError, msg.format(k)
n_unique = np.sum([x - self.n_shared for x in rich])
n_taxa_pool = len(self.taxon_pool)
n_comm = len(rich)
n_less = 0
if n_unique + self.n_shared > n_taxa_pool:
n_less = n_unique + self.n_shared - n_taxa_pool
n_less = np.ceil(n_less/ n_comm)
n_less = int(n_less) + 1
else:
return 0
for k,v in self.comm_params.items():
new_rich = v['richness'] - n_less
msg = 'WARNING: lowering richness ({} -> {}) for Community {}\n' + \
' because the taxon pool is not large enough for the\n' + \
' amount of un-shared taxa (set by --shared_perc)\n'
sys.stderr.write(msg.format(v['richness'], new_rich, k))
v['richness'] = new_rich
self._n_shared = None
def make_comm(self, comm_id):
"""Make a Comm object.
Parameters
----------
comm_id : str
Community name from comm_params attrib
"""
# assertions
comm_id = str(comm_id)
if not hasattr(self, 'comms'):
self.comms = OrderedDict() #dict()
try:
self.comm_params[comm_id]
except KeyError:
raise KeyError('Cannot find community ID "{}" in '\
'community params\n'.format(comm_id))
# init comm objects
self.comms[comm_id] = Comm(comm_id, self)
def write_comm_table(self, Long=True):
"""Joining comm objects into 1 dataframe and printing.
Writing table to STDOUT.
Parameters
----------
Long : bool
Write table in long format
"""
df = pd.concat([x.taxa for x in self.values()],
axis=1)
write_index = True
df.columns = self.keys()
if Long == True:
write_index = False
# melting
val_vars = list(df.columns)
df['taxon'] = df.index
df = pd.melt(df, id_vars=['taxon'], value_vars=val_vars)
# ordering columns
df.columns = ['taxon_name', 'library', 'rel_abund_perc']
# sorting
df = df.sort_values(by=['library','rel_abund_perc'],
ascending=[1,0])
# converting any NAs to zeros
df.fillna(0, inplace=True)
# getting rank by community (grouping by community)
df['rank'] = df.groupby(['library'])['rel_abund_perc']\
.rank(method='first',ascending=False).astype('int')
df = df[['library','taxon_name','rel_abund_perc','rank']]
# writing dataframe
df.to_csv(sys.stdout, sep='\t', na_rep=0,
float_format='%.9f', index=write_index)
@staticmethod
def permute(comm, perm_perc):
"""Permute a certain percentage of the taxa abundances.
Permuting just the indices of the series objects.
In-place edit of comm table
Parameters
----------
comm : comm table object
perm_perc : float
percent of taxa to permute
"""
# assertions
perm_perc = float(perm_perc)
assert (perm_perc >= 0 and perm_perc <= 100),\
'perm_perc is not in range [0,100]'
assert hasattr(comm, 'taxa'), \
'No "taxa" attribute for comm {}'.format(comm.comm_id)
# variables
n_perm = int(round(perm_perc / 100 * comm.n_taxa,0))
# permuting index of comm
perm_idx = sample(range(comm.n_taxa), n_perm)
perm_ig = itemgetter(perm_idx)
n_perm_idx = set(range(comm.n_taxa)) - set(perm_idx)
if len(n_perm_idx) > 0:
n_perm_ig = itemgetter(*n_perm_idx)
# altering pandas series of taxa & abundances
comm.taxa.index = random_insert_seq(n_perm_ig(comm.taxa.index),
perm_ig(comm.taxa.index))
else:
# altering pandas series of taxa & abundances
comm.taxa.index = random_insert_seq([],
perm_ig(comm.taxa.index))
# dict functions
def items(self):
return self.comms.items()
def keys(self):
try:
return self.comms.keys()
except AttributeError:
return np.sort(self.comm_params.keys())
def values(self):
return self.comms.values()
# property/setter
@property
def n_comm(self):
return self._n_comm
@n_comm.setter
def n_comm(self, x):
try:
self._n_comm = int(x)
except ValueError:
raise ValueError('n_comm must be an integer')
@property
def perm_perc(self):
return self._perm_perc
@perm_perc.setter
def perm_perc(self, x):
x = float(x)
assert (x >= 0 and x <= 100), 'shared_perc must be in range 0-100'
self._perm_perc = x
@property
def shared_perc(self):
return self._shared_perc
@shared_perc.setter
def shared_perc(self, x):
x = float(x)
assert (x >= 0 and x <= 100), 'shared_perc must be in range 0-100'
self._shared_perc = x
@property
def min_richness(self):
"""The minimum richness of any community as defined by comm_params."""
if not hasattr(self, '_min_richness'):
setattr(self, '_min_richness', None)
if self._min_richness is None:
richness_vals = []
for k,v in self.comm_params.items():
try:
richness_vals.append(int(v['richness']))
except KeyError:
raise KeyError('Cannot find richness attribute for '\
'comm_id "{}"'.format(k))
self._min_richness = min(richness_vals)
return self._min_richness
@property
def n_shared(self):
"""The number of taxa that should be shared;
defined by shared_perc * min richness of any community.
"""
if not hasattr(self, '_n_shared'):
setattr(self, '_n_shared', None)
if self._n_shared is None:
self._n_shared = self.min_richness * (self.shared_perc / 100.0)
self._n_shared = int(round(self._n_shared,0))
return self._n_shared
@property
def n_taxa_remaining(self):
"""The number of taxa that remain in taxa pool.
"""
if not hasattr(self, '_n_taxa_remaining'):
setattr(self, '_n_taxa_remaining', None)
return len(self.taxon_pool)
class Comm(_Comm):
"""Community class"""
def __init__(self, comm_id, GradientComms, *args, **kwargs):
"""
Parameters
----------
comm_id : str
community ID
GradientComms : gradient community object
"""
_Comm.__init__(self, *args, **kwargs)
self.comm_id = comm_id
self.params = GradientComms.comm_params[comm_id]
self.n_shared = GradientComms.n_shared
self.taxon_pool = GradientComms.taxon_pool
self.richness = self.params['richness']
# print '--start--'
# print self.richness;
# print self.n_shared;
# print GradientComms.n_taxa_remaining;
# print self.n_shared + GradientComms.n_taxa_remaining;
# print '--end--'
# sys.exit();
# assertions
if self.richness > self.n_shared + GradientComms.n_taxa_remaining:
sys.exit('ERROR: Comm_ID {}\n'\
' Community richness is set too high! It is > taxon pool.\n'\
' There is not enough taxa to make the desired communities.\n' \
' You must reduce richness or increase perc_shared.\n'\
' NOTE: shared_perc is based on the community with the min. ' \
'richness.\n'.format(comm_id))
# selecting additional taxa beyond those shared by all comms
## unique taxa inserted rand in list while keeping shared taxa r-abund
n_unique = self.richness - GradientComms.n_shared
assert n_unique >= 0, 'ERROR: Comm_ID {}: the number ' \
'of unique taxa is < 0'.format(comm_id)
self.taxa = random_insert_seq(GradientComms.shared_taxa,
GradientComms._drawFromTaxonPool(n_unique))
# drawing relative abundances from the user-defined distribution
abund_dist = self._get_abund_dist(self.params['abund_dist'],
self.params['abund_dist_p'])
rel_abunds = abund_dist(size=self.n_taxa)
rel_abunds = np.sort(rel_abunds / sum(rel_abunds) * 100)[::-1]
# making a series for the taxa
self.taxa = pd.Series(rel_abunds, index=self.taxa)
def __repr__(self):
return self.taxa.__repr__()
def _get_abund_dist(self, dist, params):
try:
distFunc = getattr(np.random, dist)
except AttributeError:
msg = 'Distribution "{}" is not supported'.format(dist)
if dist == 'power':
distFunc = power_neg
try:
return partial(distFunc, **params)
except TypeError:
param_str = [':'.join([str(k),str(v)]) for k,v in params.items()]
param_str = ','.join(param_str)
msg = 'Params "{}" do not work with function "{}"'\
.format(param_str, dist)
raise TypeError, msg
@property
def n_taxa(self):
return len(self.taxa)
| {
"content_hash": "b226e199bff47778d13b473f984bb51a",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 83,
"avg_line_length": 33.20696324951644,
"alnum_prop": 0.5240563839701771,
"repo_name": "nick-youngblut/SIPSim",
"id": "1fddb47c465d64033a48e5d2195ea27183f5c9ab",
"size": "17190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SIPSim/SimComms.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3184"
},
{
"name": "HTML",
"bytes": "237223"
},
{
"name": "Jupyter Notebook",
"bytes": "195945288"
},
{
"name": "Python",
"bytes": "581527"
}
],
"symlink_target": ""
} |
"""
This overrides the Category with the class loaded from the
PUBLICATION_BACKBONE_CATEGORY_MODEL setting if it exists.
"""
from django.conf import settings
from publication_backbone.utils.loader import load_class
#==============================================================================
# Extensibility
#==============================================================================
CATEGORY_MODEL = getattr(settings, 'PUBLICATION_BACKBONE_CATEGORY_MODEL',
'publication_backbone.models.defaults.category.category.Category')
Category = load_class(CATEGORY_MODEL, 'PUBLICATION_BACKBONE_CATEGORY_MODEL')
| {
"content_hash": "7009824c96a6af891e734589735cc642",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 43.92857142857143,
"alnum_prop": 0.5967479674796748,
"repo_name": "Excentrics/publication-backbone",
"id": "ccb6b115bcc48ef889286e82c2780a7e75445248",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publication_backbone/models/category_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "447762"
},
{
"name": "HTML",
"bytes": "217091"
},
{
"name": "JavaScript",
"bytes": "904819"
},
{
"name": "Python",
"bytes": "470545"
}
],
"symlink_target": ""
} |
from ..dojo_test_case import DojoTestCase, get_unit_tests_path
from dojo.tools.solar_appscreener.parser import SolarAppscreenerParser
from dojo.models import Test
class TestSolarAppscreenerParser(DojoTestCase):
def test_solar_appscreener_parser_with_no_vuln_has_no_findings(self):
testfile = open(
get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_zero_vul.csv")
parser = SolarAppscreenerParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_solar_appscreener_parser_with_one_criticle_vuln_has_one_findings(self):
testfile = open(
get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_one_vul.csv")
parser = SolarAppscreenerParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
finding = findings[0]
self.assertEqual(1, len(findings))
self.assertEqual("Hardcoded password", finding.title)
self.assertEqual("Critical", finding.severity)
self.assertEqual("misc/shared.php", finding.file_path)
self.assertEqual(151, finding.line)
self.assertEqual("misc/shared.php", finding.sast_source_file_path)
self.assertEqual(151, finding.sast_source_line)
def test_solar_appscreener_parser_with_many_vuln_has_many_findings(self):
testfile = open(
get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_many_vul.csv")
parser = SolarAppscreenerParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
finding = findings[0]
self.assertEqual(3, len(findings))
self.assertEqual("Hardcoded password", finding.title)
self.assertEqual("Critical", finding.severity)
self.assertEqual("misc/shared.php", finding.file_path)
self.assertEqual(151, finding.line)
self.assertEqual("misc/shared.php", finding.sast_source_file_path)
self.assertEqual(151, finding.sast_source_line)
finding = findings[1]
self.assertEqual("Internal information leak", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual("index.php", finding.file_path)
self.assertEqual(5, finding.line)
self.assertEqual("index.php", finding.sast_source_file_path)
self.assertEqual(5, finding.sast_source_line)
finding = findings[2]
self.assertEqual("Trust boundary violation", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual("index.php", finding.sast_source_file_path)
self.assertEqual(51, finding.sast_source_line),
self.assertEqual("index.php", finding.file_path)
self.assertEqual(51, finding.line)
| {
"content_hash": "4641af10b49402014d90061c00448630",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 94,
"avg_line_length": 48.046875,
"alnum_prop": 0.672520325203252,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "5268b3621e01c2369838e44acd118db7b0de7b3d",
"size": "3075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittests/tools/test_solar_appscreener_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os, sys, StringIO
from unittest import TestCase
from ambari_agent import manifestGenerator
import ambari_agent.AmbariConfig
import tempfile
import json
import shutil
from ambari_agent.AmbariConfig import AmbariConfig
from mock.mock import patch, MagicMock, call
class TestManifestGenerator(TestCase):
def setUp(self):
# disable stdout
out = StringIO.StringIO()
sys.stdout = out
self.dir = tempfile.mkdtemp()
self.config = AmbariConfig()
jsonCommand = file('../../main/python/ambari_agent/test.json').read()
self.parsedJson = json.loads(jsonCommand)
def tearDown(self):
shutil.rmtree(self.dir)
# enable stdout
sys.stdout = sys.__stdout__
def testWriteImports(self):
tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
print tmpFileName
tmpFile = file(tmpFileName, 'r+')
manifestGenerator.writeImports(tmpFile, '../../main/puppet/modules', self.config.getImports())
tmpFile.seek(0)
print tmpFile.read()
tmpFile.close()
pass
@patch.object(manifestGenerator, 'writeImports')
@patch.object(manifestGenerator, 'writeNodes')
@patch.object(manifestGenerator, 'writeParams')
@patch.object(manifestGenerator, 'writeTasks')
def testGenerateManifest(self, writeTasksMock, writeParamsMock, writeNodesMock, writeImportsMock):
tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
self.parsedJson['roleParams'] = 'role param'
manifestGenerator.generateManifest(self.parsedJson, tmpFileName, '../../main/puppet/modules', self.config.getConfig())
self.assertTrue(writeParamsMock.called)
self.assertTrue(writeNodesMock.called)
self.assertTrue(writeImportsMock.called)
self.assertTrue(writeTasksMock.called)
print file(tmpFileName).read()
pass
def testEscape(self):
shouldBe = '\\\'\\\\'
result = manifestGenerator.escape('\'\\')
self.assertEqual(result, shouldBe)
def test_writeNodes(self):
tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
tmpFile = file(tmpFileName, 'r+')
clusterHostInfo = self.parsedJson['clusterHostInfo']
clusterHostInfo['zookeeper_hosts'] = ["h1.hortonworks.com", "h2.hortonworks.com"]
manifestGenerator.writeNodes(tmpFile, clusterHostInfo)
tmpFile.seek(0)
print tmpFile.read()
tmpFile.close()
os.remove(tmpFileName)
def test_writeHostAttributes(self):
tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
tmpFile = file(tmpFileName, 'r+')
hostAttributes = {'HostAttr1' : '1', 'HostAttr2' : '2'}
manifestGenerator.writeHostAttributes(tmpFile, hostAttributes)
tmpFile.seek(0)
print tmpFile.read()
tmpFile.close()
os.remove(tmpFileName)
def test_writeTasks(self):
tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1]
tmpFile = file(tmpFileName, 'r+')
roles = [{'role' : 'ZOOKEEPER_SERVER',
'cmd' : 'NONE',
'roleParams' : {'someRoleParams': '-x'}}]
clusterHostInfo = self.parsedJson['clusterHostInfo']
clusterHostInfo['zookeeper_hosts'] = ["h1.hortonworks.com", "h2.hortonworks.com"]
manifestGenerator.writeTasks(tmpFile, roles, self.config, clusterHostInfo, "h1.hortonworks.com")
tmpFile.seek(0)
print tmpFile.read()
tmpFile.close()
os.remove(tmpFileName) | {
"content_hash": "2ccc4659e3eee94ddae67772d5d44278",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 122,
"avg_line_length": 32.814516129032256,
"alnum_prop": 0.7230277709510936,
"repo_name": "telefonicaid/fiware-cosmos-ambari",
"id": "4007f8fb2b4ab7248c6261c373b7892b8116f650",
"size": "4095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ambari-agent/src/test/python/TestManifestGenerator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "100347"
},
{
"name": "CoffeeScript",
"bytes": "3193"
},
{
"name": "Java",
"bytes": "4427848"
},
{
"name": "JavaScript",
"bytes": "2778621"
},
{
"name": "PHP",
"bytes": "210273"
},
{
"name": "Perl",
"bytes": "2767"
},
{
"name": "Puppet",
"bytes": "468628"
},
{
"name": "Python",
"bytes": "1315868"
},
{
"name": "Ruby",
"bytes": "474815"
},
{
"name": "Shell",
"bytes": "226211"
}
],
"symlink_target": ""
} |
"""
Simple demonstration of the plane geometry.
"""
import sys
from vispy import scene, geometry
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
view = canvas.central_widget.add_view()
vertices, faces, outline = geometry.create_plane(width=2, height=4,
width_segments=4,
height_segments=8,
direction='+y')
plane = scene.visuals.Plane(width=2, height=4, width_segments=4,
height_segments=8, direction='+y',
vertex_colors=vertices['color'],
edge_color='k',
parent=view.scene)
camera = scene.cameras.TurntableCamera(fov=45, azimuth=-45, parent=view.scene)
view.camera = camera
if __name__ == '__main__' and sys.flags.interactive == 0:
canvas.app.run()
| {
"content_hash": "2d5708a34c01ecd21b2c918772f3985c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 33.82142857142857,
"alnum_prop": 0.5248152059134108,
"repo_name": "Eric89GXL/vispy",
"id": "4e9a3b335e9585b15590aa3e283f745a613fd298",
"size": "1107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/basics/visuals/plane.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2461885"
}
],
"symlink_target": ""
} |
"""
Implements visualizers that use the silhouette metric for cluster evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.ticker as ticker
from sklearn.metrics import silhouette_score, silhouette_samples
from yellowbrick.utils import check_fitted
from yellowbrick.style import resolve_colors
from yellowbrick.cluster.base import ClusteringScoreVisualizer
## Packages for export
__all__ = ["SilhouetteVisualizer", "silhouette_visualizer"]
##########################################################################
## Silhouette Method for K Selection
##########################################################################
class SilhouetteVisualizer(ClusteringScoreVisualizer):
"""
The Silhouette Visualizer displays the silhouette coefficient for each
sample on a per-cluster basis, visually evaluating the density and
separation between clusters. The score is calculated by averaging the
silhouette coefficient for each sample, computed as the difference
between the average intra-cluster distance and the mean nearest-cluster
distance for each sample, normalized by the maximum value. This produces a
score between -1 and +1, where scores near +1 indicate high separation
and scores near -1 indicate that the samples may have been assigned to
the wrong cluster.
In SilhouetteVisualizer plots, clusters with higher scores have wider
silhouettes, but clusters that are less cohesive will fall short of the
average score across all clusters, which is plotted as a vertical dotted
red line.
This is particularly useful for determining cluster imbalance, or for
selecting a value for K by comparing multiple visualizers.
Parameters
----------
estimator : a Scikit-Learn clusterer
Should be an instance of a centroidal clustering algorithm (``KMeans``
or ``MiniBatchKMeans``). If the estimator is not fitted, it is fit when
the visualizer is fitted, unless otherwise specified by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
colors : iterable or string, default: None
A collection of colors to use for each cluster group. If there are
fewer colors than cluster groups, colors will repeat. May also be a
Yellowbrick or matplotlib colormap string.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the
estimator will be fit when the visualizer is fit, otherwise, the
estimator will not be modified. If 'auto' (default), a helper method
will check if the estimator is fitted before fitting it again.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
silhouette_score_ : float
Mean Silhouette Coefficient for all samples. Computed via scikit-learn
`sklearn.metrics.silhouette_score`.
silhouette_samples_ : array, shape = [n_samples]
Silhouette Coefficient for each samples. Computed via scikit-learn
`sklearn.metrics.silhouette_samples`.
n_samples_ : integer
Number of total samples in the dataset (X.shape[0])
n_clusters_ : integer
Number of clusters (e.g. n_clusters or k value) passed to internal
scikit-learn model.
y_tick_pos_ : array of shape (n_clusters,)
The computed center positions of each cluster on the y-axis
Examples
--------
>>> from yellowbrick.cluster import SilhouetteVisualizer
>>> from sklearn.cluster import KMeans
>>> model = SilhouetteVisualizer(KMeans(10))
>>> model.fit(X)
>>> model.show()
"""
def __init__(self, estimator, ax=None, colors=None, is_fitted="auto", **kwargs):
# Initialize the visualizer bases
super(SilhouetteVisualizer, self).__init__(estimator, ax=ax, **kwargs)
# Visual Properties
# Use colors if it is given, otherwise attempt to use colormap which
# which will override colors. If neither is found, default to None.
# The colormap may yet still be found in resolve_colors
self.colors = colors
if "colormap" in kwargs:
self.colors = kwargs["colormap"]
def fit(self, X, y=None, **kwargs):
"""
Fits the model and generates the silhouette visualization.
"""
# TODO: decide to use this method or the score method to draw.
# NOTE: Probably this would be better in score, but the standard score
# is a little different and I'm not sure how it's used.
if not check_fitted(self.estimator, is_fitted_by=self.is_fitted):
# Fit the wrapped estimator
self.estimator.fit(X, y, **kwargs)
# Get the properties of the dataset
self.n_samples_ = X.shape[0]
self.n_clusters_ = self.estimator.n_clusters
# Compute the scores of the cluster
labels = self.estimator.predict(X)
self.silhouette_score_ = silhouette_score(X, labels)
self.silhouette_samples_ = silhouette_samples(X, labels)
# Draw the silhouette figure
self.draw(labels)
# Return the estimator
return self
def draw(self, labels):
"""
Draw the silhouettes for each sample and the average score.
Parameters
----------
labels : array-like
An array with the cluster label for each silhouette sample,
usually computed with ``predict()``. Labels are not stored on the
visualizer so that the figure can be redrawn with new data.
"""
# Track the positions of the lines being drawn
y_lower = 10 # The bottom of the silhouette
# Get the colors from the various properties
color_kwargs = {"n_colors": self.n_clusters_}
if self.colors is None:
color_kwargs["colormap"] = "Set1"
elif isinstance(self.colors, str):
color_kwargs["colormap"] = self.colors
else:
color_kwargs["colors"] = self.colors
colors = resolve_colors(**color_kwargs)
# For each cluster, plot the silhouette scores
self.y_tick_pos_ = []
for idx in range(self.n_clusters_):
# Collect silhouette scores for samples in the current cluster .
values = self.silhouette_samples_[labels == idx]
values.sort()
# Compute the size of the cluster and find upper limit
size = values.shape[0]
y_upper = y_lower + size
color = colors[idx]
self.ax.fill_betweenx(
np.arange(y_lower, y_upper),
0,
values,
facecolor=color,
edgecolor=color,
alpha=0.5,
)
# Collect the tick position for each cluster
self.y_tick_pos_.append(y_lower + 0.5 * size)
# Compute the new y_lower for next plot
y_lower = y_upper + 10
# The vertical line for average silhouette score of all the values
self.ax.axvline(
x=self.silhouette_score_,
color="red",
linestyle="--",
label="Average Silhouette Score",
)
return self.ax
def finalize(self):
"""
Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend.
"""
# Set the title
self.set_title(
("Silhouette Plot of {} Clustering for {} Samples in {} Centers").format(
self.name, self.n_samples_, self.n_clusters_
)
)
# Set the X and Y limits
# The silhouette coefficient can range from -1, 1;
# but here we scale the plot according to our visualizations
# l_xlim and u_xlim are lower and upper limits of the x-axis,
# set according to our calculated max and min score with necessary padding
l_xlim = max(-1, min(-0.1, round(min(self.silhouette_samples_) - 0.1, 1)))
u_xlim = min(1, round(max(self.silhouette_samples_) + 0.1, 1))
self.ax.set_xlim([l_xlim, u_xlim])
# The (n_clusters_+1)*10 is for inserting blank space between
# silhouette plots of individual clusters, to demarcate them clearly.
self.ax.set_ylim([0, self.n_samples_ + (self.n_clusters_ + 1) * 10])
# Set the x and y labels
self.ax.set_xlabel("silhouette coefficient values")
self.ax.set_ylabel("cluster label")
# Set the ticks on the axis object.
self.ax.set_yticks(self.y_tick_pos_)
self.ax.set_yticklabels(str(idx) for idx in range(self.n_clusters_))
# Set the ticks at multiples of 0.1
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(0.1))
# Show legend (Average Silhouette Score axis)
self.ax.legend(loc="best")
##########################################################################
## Quick Method
##########################################################################
def silhouette_visualizer(
estimator, X, y=None, ax=None, colors=None, is_fitted="auto", show=True, **kwargs
):
"""Quick Method:
The Silhouette Visualizer displays the silhouette coefficient for each
sample on a per-cluster basis, visually evaluating the density and
separation between clusters. The score is calculated by averaging the
silhouette coefficient for each sample, computed as the difference
between the average intra-cluster distance and the mean nearest-cluster
distance for each sample, normalized by the maximum value. This produces a
score between -1 and +1, where scores near +1 indicate high separation
and scores near -1 indicate that the samples may have been assigned to
the wrong cluster.
Parameters
----------
estimator : a Scikit-Learn clusterer
Should be an instance of a centroidal clustering algorithm (``KMeans``
or ``MiniBatchKMeans``). If the estimator is not fitted, it is fit when
the visualizer is fitted, unless otherwise specified by ``is_fitted``.
X : array-like of shape (n, m)
A matrix or data frame with n instances and m features
y : array-like of shape (n,), optional
A vector or series representing the target for each instance
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
colors : iterable or string, default: None
A collection of colors to use for each cluster group. If there are
fewer colors than cluster groups, colors will repeat. May also be a
Yellowbrick or matplotlib colormap string.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the
estimator will be fit when the visualizer is fit, otherwise, the
estimator will not be modified. If 'auto' (default), a helper method
will check if the estimator is fitted before fitting it again.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
viz : SilhouetteVisualizer
The silhouette visualizer, fitted and finalized.
"""
oz = SilhouetteVisualizer(
estimator, ax=ax, colors=colors, is_fitted=is_fitted, **kwargs
)
oz.fit(X, y)
if show:
oz.show()
else:
oz.finalize()
return oz
| {
"content_hash": "a3718f9e7aa3bf4565c2b95193dc1821",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 85,
"avg_line_length": 37.900621118012424,
"alnum_prop": 0.6255326122582759,
"repo_name": "DistrictDataLabs/yellowbrick",
"id": "3af5d337b0811e432df9ca03fdb2b015c0aa01fb",
"size": "12564",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "yellowbrick/cluster/silhouette.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1069"
},
{
"name": "Python",
"bytes": "1612806"
},
{
"name": "TeX",
"bytes": "3743"
}
],
"symlink_target": ""
} |
"""
Admin views for managing shares.
"""
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from manila_ui.api import manila
from manila_ui.dashboards.admin.shares import forms as project_forms
from manila_ui.dashboards.admin.shares import tables as s_tables
from manila_ui.dashboards.admin.shares import tabs as s_tabs
from manila_ui.dashboards.admin import utils
from manila_ui.dashboards.project.shares import views as share_views
class SharesView(tables.MultiTableView, share_views.ShareTableMixIn):
table_classes = (
s_tables.SharesTable,
)
template_name = "admin/shares/index.html"
page_title = _("Shares")
@memoized.memoized_method
def get_shares_data(self):
shares = []
try:
shares = manila.share_list(
self.request, search_opts={'all_tenants': True})
snapshots = manila.share_snapshot_list(
self.request, detailed=True, search_opts={'all_tenants': True})
share_ids_with_snapshots = []
for snapshot in snapshots:
share_ids_with_snapshots.append(snapshot.to_dict()['share_id'])
for share in shares:
if share.to_dict()['id'] in share_ids_with_snapshots:
setattr(share, 'has_snapshot', True)
else:
setattr(share, 'has_snapshot', False)
except Exception:
exceptions.handle(
self.request, _('Unable to retrieve share list.'))
# Gather our projects to correlate against IDs
utils.set_project_name_to_objects(self.request, shares)
return shares
class DetailView(share_views.DetailView):
tab_group_class = s_tabs.ShareDetailTabs
template_name = "admin/shares/detail.html"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["page_title"] = _("Share Details: %(share_name)s") % {
'share_name': context["share_display_name"]}
return context
class ManageShareView(forms.ModalFormView):
form_class = project_forms.ManageShare
form_id = "manage_share"
template_name = 'admin/shares/manage_share.html'
modal_header = _("Manage Share")
modal_id = "manage_share_modal"
submit_label = _("Manage")
success_url = reverse_lazy('horizon:admin:shares:index')
submit_url = reverse_lazy('horizon:admin:shares:manage')
page_title = _("Manage Share")
def get_context_data(self, **kwargs):
context = super(ManageShareView, self).get_context_data(**kwargs)
return context
class MigrationStartView(forms.ModalFormView):
form_class = project_forms.MigrationStart
template_name = 'admin/shares/migration_start.html'
modal_header = _("Migrate Share")
form_id = "migration_start_share"
modal_id = "migration_start_share_modal"
submit_label = _("Start migration")
success_url = reverse_lazy('horizon:admin:shares:index')
submit_url = 'horizon:admin:shares:migration_start'
cancel_url = reverse_lazy('horizon:admin:shares:index')
page_title = _("Migrate a Share")
def get_context_data(self, **kwargs):
context = super(MigrationStartView, self).get_context_data(**kwargs)
args = (self.kwargs['share_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
share_id = self.kwargs['share_id']
share = manila.share_get(self.request, share_id)
except Exception:
exceptions.handle(
self.request, _('Unable to retrieve share details.'),
redirect=self.success_url)
return share
def get_initial(self):
share = self.get_data()
return {
'share_id': self.kwargs["share_id"],
'name': share.name,
}
class MigrationCompleteView(forms.ModalFormView):
form_class = project_forms.MigrationComplete
template_name = 'admin/shares/migration_complete.html'
modal_header = _("Confirm Migration Completion of Share")
form_id = "migration_complete_share"
modal_id = "migration_complete_share_modal"
submit_label = _("Complete Migration")
success_url = reverse_lazy('horizon:admin:shares:index')
submit_url = 'horizon:admin:shares:migration_complete'
cancel_url = reverse_lazy('horizon:admin:shares:index')
page_title = _("Complete migration of a Share")
def get_context_data(self, **kwargs):
context = super(MigrationCompleteView, self).get_context_data(**kwargs)
args = (self.kwargs['share_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
share_id = self.kwargs['share_id']
share = manila.share_get(self.request, share_id)
except Exception:
exceptions.handle(
self.request, _('Unable to retrieve share details.'),
redirect=self.success_url)
return share
def get_initial(self):
share = self.get_data()
return {
'share_id': self.kwargs["share_id"],
'name': share.name,
}
class MigrationCancelView(forms.ModalFormView):
form_class = project_forms.MigrationCancel
template_name = 'admin/shares/migration_cancel.html'
modal_header = _("Confirm Migration Cancelling of Share")
form_id = "migration_cancel_share"
modal_id = "migration_cancel_share_modal"
submit_label = _("Cancel Migration")
success_url = reverse_lazy('horizon:admin:shares:index')
submit_url = 'horizon:admin:shares:migration_cancel'
cancel_url = reverse_lazy('horizon:admin:shares:index')
page_title = _("Cancel migration of a Share")
def get_context_data(self, **kwargs):
context = super(MigrationCancelView, self).get_context_data(**kwargs)
args = (self.kwargs['share_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
share_id = self.kwargs['share_id']
share = manila.share_get(self.request, share_id)
except Exception:
exceptions.handle(
self.request, _('Unable to retrieve share details.'),
redirect=self.success_url)
return share
def get_initial(self):
share = self.get_data()
return {
'share_id': self.kwargs["share_id"],
'name': share.name,
}
class MigrationGetProgressView(forms.ModalFormView):
form_class = project_forms.MigrationGetProgress
template_name = 'admin/shares/migration_get_progress.html'
modal_header = _("Confirm Obtaining migration progress of Share")
form_id = "migration_get_progress_share"
modal_id = "migration_get_progress_share_modal"
submit_label = _("Obtain Progress")
success_url = reverse_lazy('horizon:admin:shares:index')
submit_url = 'horizon:admin:shares:migration_get_progress'
cancel_url = reverse_lazy('horizon:admin:shares:index')
page_title = _("Obtain migration progress of a Share")
def get_context_data(self, **kwargs):
context = super(MigrationGetProgressView,
self).get_context_data(**kwargs)
args = (self.kwargs['share_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
share_id = self.kwargs['share_id']
share = manila.share_get(self.request, share_id)
except Exception:
exceptions.handle(
self.request, _('Unable to retrieve share details.'),
redirect=self.success_url)
return share
def get_initial(self):
share = self.get_data()
return {
'share_id': self.kwargs["share_id"],
'name': share.name,
}
class UnmanageShareView(forms.ModalFormView):
form_class = project_forms.UnmanageShare
form_id = "unmanage_share"
template_name = 'admin/shares/unmanage_share.html'
modal_header = _("Confirm Unmanage Share")
modal_id = "unmanage_share_modal"
submit_label = _("Unmanage")
success_url = reverse_lazy('horizon:admin:shares:index')
submit_url = 'horizon:admin:shares:unmanage'
page_title = _("Unmanage Share")
def get_context_data(self, **kwargs):
context = super(UnmanageShareView, self).get_context_data(**kwargs)
args = (self.kwargs['share_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
share_id = self.kwargs['share_id']
share = manila.share_get(self.request, share_id)
except Exception:
exceptions.handle(
self.request, _('Unable to retrieve volume details.'),
redirect=self.success_url)
return share
def get_initial(self):
share = self.get_data()
return {
'share_id': self.kwargs["share_id"],
'name': share.name,
'host': getattr(share, "host"),
}
| {
"content_hash": "af7a0394086c42c265cf09cc003c1b91",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 79,
"avg_line_length": 36.246212121212125,
"alnum_prop": 0.6307869160831853,
"repo_name": "openstack/manila-ui",
"id": "7ec5a153bcdb5980ef7dce92d37b1a303d6f542d",
"size": "10174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila_ui/dashboards/admin/shares/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "72666"
},
{
"name": "Python",
"bytes": "756045"
},
{
"name": "Shell",
"bytes": "20977"
}
],
"symlink_target": ""
} |
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import standard_ops
class DenseWithWeightNorm(base.Layer):
"""Densely-connected layer class with weight normalization.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
scale: If True, multiply by `g`. If False, `g` is not used.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
**kwargs):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.scale = scale
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError(
'The last dimension of the inputs to `DenseWithWeightNorm` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(
min_ndim=2, axes={-1: input_shape[-1].value})
if self.scale:
self.g = self.add_variable(
'g',
shape=[1, self.units],
dtype=self.dtype,
initializer=init_ops.ones_initializer(),
trainable=True)
else:
self.g = 1.
self.kernel = self.add_variable(
'kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable(
'bias',
shape=[
self.units,
],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
scaled_kernel = tf.nn.l2_normalize(self.kernel, 0)
if self.scale:
scaled_kernel = math_ops.multiply(self.g, scaled_kernel)
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, scaled_kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
if context.in_graph_mode():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, scaled_kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def dense_with_weight_norm(inputs,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
layer = DenseWithWeightNorm(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
scale=scale,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class _ConvWithWeightNorm(base.Layer):
"""Abstract nD convolution layer (private, used as implementation base) with weight normalization.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
**kwargs):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = utils.normalize_tuple(kernel_size, rank,
'kernel_size')
self.strides = utils.normalize_tuple(strides, rank, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.dilation_rate = utils.normalize_tuple(dilation_rate, rank,
'dilation_rate')
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.scale = scale
self.input_spec = base.InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
if self.scale:
self.g = self.add_variable(
'g',
shape=[1] * (len(self.kernel_size) + 1) + [self.filters],
dtype=self.dtype,
initializer=init_ops.ones_initializer(),
trainable=True)
else:
self.g = 1.
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters, ),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_dim})
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.get_shape(),
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format,
self.rank + 2))
self.built = True
def call(self, inputs):
scaled_kernel = tf.nn.l2_normalize(
self.kernel, list(range(len(self.kernel_size) + 1)))
if self.scale:
scaled_kernel = math_ops.multiply(self.g, scaled_kernel)
outputs = self._convolution_op(inputs, scaled_kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(
outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(
outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
class Conv1DWithWeightNorm(_ConvWithWeightNorm):
"""1D convolution layer (e.g. temporal convolution) with weight normalization.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
**kwargs):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
scale=scale,
trainable=trainable,
name=name,
**kwargs)
def conv1d_with_weight_norm(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
reuse=None):
"""Functional interface for 1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
layer = Conv1DWithWeightNorm(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
scale=scale,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
class Conv2DWithWeightNorm(_ConvWithWeightNorm):
"""2D convolution layer (e.g. spatial convolution over images) with weight normalization.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
**kwargs):
super().__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
scale=scale,
trainable=trainable,
name=name,
**kwargs)
def conv2d_with_weight_norm(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the 2D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
layer = Conv2DWithWeightNorm(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
scale=scale,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
class Conv3DWithWeightNorm(_ConvWithWeightNorm):
"""3D convolution layer (e.g. spatial convolution over volumes) with weight normalization.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
**kwargs):
super().__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
scale=scale,
trainable=trainable,
name=name,
**kwargs)
def conv3d_with_weight_norm(inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
scale=True,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the 3D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
scale: If True, multiply by `g`. If False, `g` is not used.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
References:
- [Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](https://arxiv.org/abs/1602.07868)
"""
layer = Conv3DWithWeightNorm(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
scale=scale,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
class Conv2DTransposeWithWeightNorm(Conv2DWithWeightNorm):
"""Transposed 2D convolution layer (sometimes called 2D Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, no bias will
be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
scale=True,
name=None,
**kwargs):
super().__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
scale=scale,
name=name,
**kwargs)
self.input_spec = base.InputSpec(ndim=4)
def build(self, input_shape):
if len(input_shape) != 4:
raise ValueError(
'Inputs should have rank ' + str(4) + 'Received input shape:',
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
self.input_spec = base.InputSpec(
ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
if self.scale:
self.g = self.add_variable(
'g',
shape=[1] * len(self.kernel_size) + [self.filters, 1],
dtype=self.dtype,
initializer=init_ops.ones_initializer(),
trainable=True)
else:
self.g = 1.
self.kernel = self.add_variable(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_variable(
name='bias',
shape=(self.filters, ),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
height, width = inputs_shape[h_axis], inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
# Infer the dynamic output shape:
out_height = utils.deconv_output_length(height, kernel_h, self.padding,
stride_h)
out_width = utils.deconv_output_length(width, kernel_w, self.padding,
stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
strides = (1, 1, stride_h, stride_w)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
strides = (1, stride_h, stride_w, 1)
kernel_len = len(self.kernel_size)
scaled_kernel = tf.nn.l2_normalize(
self.kernel, list(range(kernel_len)) + [1 + kernel_len])
if self.scale:
scaled_kernel = math_ops.multiply(self.g, scaled_kernel)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv2d_transpose(
inputs,
scaled_kernel,
output_shape_tensor,
strides,
padding=self.padding.upper(),
data_format=utils.convert_data_format(self.data_format, ndim=4))
if context.in_graph_mode():
# Infer the static output shape:
out_shape = inputs.get_shape().as_list()
out_shape[c_axis] = self.filters
out_shape[h_axis] = utils.deconv_output_length(
out_shape[h_axis], kernel_h, self.padding, stride_h)
out_shape[w_axis] = utils.deconv_output_length(
out_shape[w_axis], kernel_w, self.padding, stride_w)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=utils.convert_data_format(
self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
output_shape[c_axis] = self.filters
output_shape[h_axis] = utils.deconv_output_length(
output_shape[h_axis], kernel_h, self.padding, stride_h)
output_shape[w_axis] = utils.deconv_output_length(
output_shape[w_axis], kernel_w, self.padding, stride_w)
return tensor_shape.TensorShape(output_shape)
def conv2d_transpose_with_weight_norm(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
scale=True,
name=None,
reuse=None):
"""Functional interface for transposed 2D convolution layer.
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to `None` to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If `None`, then no
bias will be applied.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv2DTransposeWithWeightNorm(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
scale=scale,
name=name,
dtype=inputs.dtype.base_dtype,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
class PReLU(base.Layer):
"""Parametric Rectified Linear Unit.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: initializer function for the weights.
alpha_regularizer: regularizer for the weights.
alpha_constraint: constraint for the weights.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
activity_regularizer: Optional regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
alpha_initializer=init_ops.zeros_initializer(),
alpha_regularizer=None,
activity_regularizer=None,
alpha_constraint=lambda x: clip_ops.clip_by_value(x, 0., 1.),
shared_axes=None,
trainable=True,
name=None,
**kwargs):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.supports_masking = True
self.alpha_initializer = alpha_initializer
self.alpha_regularizer = alpha_regularizer
self.alpha_constraint = alpha_constraint
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
param_shape = input_shape[1:]
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
self.alpha = self.add_variable(
'alpha',
shape=param_shape,
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint,
dtype=self.dtype,
trainable=True)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = base.InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs, mask=None):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
return math_ops.maximum(self.alpha * inputs, inputs)
def parametric_relu(
inputs,
alpha_initializer=init_ops.zeros_initializer(),
alpha_regularizer=None,
activity_regularizer=None,
alpha_constraint=lambda x: clip_ops.clip_by_value(x, 0., 1.),
shared_axes=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the PReLU layer.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: initializer function for the weights.
alpha_regularizer: regularizer for the weights.
activity_regularizer: Optional regularizer function for the output.
alpha_constraint: constraint for the weights.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = PReLU(
alpha_initializer=alpha_initializer,
alpha_regularizer=alpha_regularizer,
activity_regularizer=activity_regularizer,
alpha_constraint=alpha_constraint,
shared_axes=shared_axes,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
class TPReLU(base.Layer):
"""Translated Parametric Rectified Linear Unit.
It follows:
`f(x) = alpha * x + (1-alpha) * bias for x < bias`,
`f(x) = x for x >= bias`,
where `alpha` is a learned array with the same shape as x,
and `bias` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: initializer function for the weights.
bias_initializer: initializer function for the bias.
alpha_regularizer: regularizer for the weights.
bias_regularizer: regularizer for the bias.
activity_regularizer: Optional regularizer function for the output.
alpha_constraint: constraint for the weights.
bias_constraint: constraint for the bias.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
alpha_initializer=init_ops.zeros_initializer(),
bias_initializer=init_ops.zeros_initializer(),
alpha_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
alpha_constraint=lambda x: clip_ops.clip_by_value(x, 0., 1.),
bias_constraint=None,
shared_axes=None,
trainable=True,
name=None,
**kwargs):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.supports_masking = True
self.alpha_initializer = alpha_initializer
self.bias_initializer = bias_initializer
self.alpha_regularizer = alpha_regularizer
self.bias_regularizer = bias_regularizer
self.alpha_constraint = alpha_constraint
self.bias_constraint = bias_constraint
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
param_shape = input_shape[1:]
self.param_broadcast = [False] * len(param_shape)
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.param_broadcast[i - 1] = True
self.alpha = self.add_variable(
'alpha',
shape=param_shape,
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint,
dtype=self.dtype,
trainable=True)
self.bias = self.add_variable(
'bias',
shape=param_shape,
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = base.InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs, mask=None):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
inputs = math_ops.add(inputs, -self.bias)
inputs = math_ops.maximum(self.alpha * inputs, inputs)
inputs = math_ops.add(inputs, self.bias)
return inputs
def translated_parametric_relu(
inputs,
alpha_initializer=init_ops.zeros_initializer(),
bias_initializer=init_ops.zeros_initializer(),
alpha_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
alpha_constraint=lambda x: clip_ops.clip_by_value(x, 0., 1.),
bias_constraint=None,
shared_axes=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the TPReLU layer.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: initializer function for the weights.
bias_initializer: initializer function for the bias.
alpha_regularizer: regularizer for the weights.
bias_regularizer: regularizer for the bias.
activity_regularizer: Optional regularizer function for the output.
alpha_constraint: constraint for the weights.
bias_constraint: Constraint function for the bias.
shared_axes: the axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = TPReLU(
alpha_initializer=alpha_initializer,
bias_initializer=bias_initializer,
alpha_regularizer=alpha_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
alpha_constraint=alpha_constraint,
bias_constraint=bias_constraint,
shared_axes=shared_axes,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
| {
"content_hash": "ba0d9868059973a565d632318cb28fff",
"timestamp": "",
"source": "github",
"line_count": 1756,
"max_line_length": 140,
"avg_line_length": 44.4003416856492,
"alnum_prop": 0.6188387394666974,
"repo_name": "shaform/DeepNetworks",
"id": "99da5a26c606e9516940253c7a04391d51ea38a4",
"size": "78772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_networks/layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2073544"
},
{
"name": "Python",
"bytes": "210751"
}
],
"symlink_target": ""
} |
import pyjd
from pyjamas import DOM
from __pyjamas__ import JS
if not pyjd.is_desktop:
JS("""
var focusHandler = null;
""")
def ensureFocusHandler():
pass
def createFocusHandler():
pass
def createFocusable0():
pass
def blur(elem):
elem.blur()
def createFocusable():
e = DOM.createDiv()
e.tabIndex = 0
return e
def focus(elem):
elem.focus()
def getTabIndex(elem):
return elem.tabIndex
def setAccessKey(elem, key):
elem.accessKey = key
def setTabIndex(elem, index):
elem.tabIndex = index
class FocusMixin:
def getTabIndex(self):
return getTabIndex(self.getElement())
def setAccessKey(self, key):
setAccessKey(self.getElement(), key)
def setFocus(self, focused):
if (focused):
focus(self.getElement())
else:
blur(self.getElement())
def setTabIndex(self, index):
setTabIndex(self.getElement(), index)
def isEnabled(self):
print "warning: this function is deprecated, please use getEnabled"
return self.getReadonly()
def getEnabled(self):
try:
return not DOM.getBooleanAttribute(self.getElement(), "disabled")
except TypeError:
return True
except AttributeError:
return True
def setEnabled(self, enabled):
DOM.setBooleanAttribute(self.getElement(), "disabled", not enabled)
def isReadonly(self):
print "warning: this function is deprecated, please use getReadonly"
return self.getReadonly()
def getReadonly(self):
try:
return not DOM.getBooleanAttribute(self.getElement(), "readOnly")
except TypeError:
return True
except AttributeError:
return True
def setReadonly(self, readonly):
DOM.setBooleanAttribute(self.getElement(), "readOnly", readonly)
| {
"content_hash": "60e318078b9da405c3a84688e9dde0fe",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 77,
"avg_line_length": 22.162790697674417,
"alnum_prop": 0.6306400839454355,
"repo_name": "minghuascode/pyj",
"id": "dd456233298aef3aaddde657d41a73bedb51eae1",
"size": "2565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/pyjamas/ui/Focus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "107608"
},
{
"name": "JavaScript",
"bytes": "116371"
},
{
"name": "PHP",
"bytes": "5473"
},
{
"name": "Python",
"bytes": "7572605"
},
{
"name": "Shell",
"bytes": "24231"
}
],
"symlink_target": ""
} |
factorial = lambda n: reduce(lambda lhs, rhs: lhs * rhs, range(1, n + 1))
def test(condition):
if condition():
print('Thumbs up.')
else:
print('Thumbs down.')
def is_equal_to(lhs, rhs):
return lambda: lhs == rhs
test(is_equal_to(factorial(1), 1))
test(is_equal_to(factorial(2), 2))
test(is_equal_to(factorial(3), 6))
test(is_equal_to(factorial(5), 120))
| {
"content_hash": "3d4816936c2b9e337cd4b2cce5191722",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 73,
"avg_line_length": 24.4,
"alnum_prop": 0.6584699453551912,
"repo_name": "regnart-tech-club/programming-concepts",
"id": "b90d149725990571779ad7b05c87cf4b0e3bc05b",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course-2:combining-building-blocks/subject-4:all together now/topic-2:ETL/lesson-4.1:factorial solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40873"
}
],
"symlink_target": ""
} |
from setuptools import setup
from _pysh import __version__
setup(
name="py.sh-helpers",
version=".".join(map(str, __version__)),
license="BSD",
description="Helper module for py.sh.",
author="Dave Hall",
author_email="dave@etianen.com",
url="https://github.com/etianen/py.sh",
packages=["_pysh"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
],
)
| {
"content_hash": "34744786120955627ae433629bb30942",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.5968253968253968,
"repo_name": "etianen/py.sh",
"id": "c9073056f9442074bdcbfedba5d20d8a9a04db8f",
"size": "630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19189"
},
{
"name": "Shell",
"bytes": "6946"
}
],
"symlink_target": ""
} |
import pytest
from flask import json
from willstores.util.response_schema import GenderSchema
def test_gender_default(flask_app_client):
response = flask_app_client.get(
"/gender/men",
headers={"Accept": "application/json"}
)
data = json.loads(response.data)
GenderSchema(strict=True).load(data)
assert response.status_code == 200
def test_gender_nocontent(flask_app_client):
response = flask_app_client.get(
"/gender/dontexist",
headers={"Accept": "application/json"}
)
assert response.status_code == 204
def test_gender_discounts_valid(flask_app_client):
response = flask_app_client.get(
"/gender/men",
headers={"Accept": "application/json"},
json={
"discounts": 3
}
)
data = json.loads(response.data)
GenderSchema(strict=True).load(data)
assert response.status_code == 200
def test_gender_random_json(flask_app_client):
response = flask_app_client.get(
"/gender/men",
headers={"Accept": "application/json"},
json={
"churros": "chocolate"
}
)
data = json.loads(response.data)
GenderSchema(strict=True).load(data)
assert response.status_code == 200
def test_gender_discounts_string(flask_app_client):
response = flask_app_client.get(
"/gender/men",
headers={"Accept": "application/json"},
json={
"discounts": "a"
}
)
data = json.loads(response.data)
assert response.status_code == 400
assert set(data.keys()) >= {"error"}
def test_gender_discounts_negative(flask_app_client):
response = flask_app_client.get(
"/gender/men",
headers={"Accept": "application/json"},
json={
"discounts": -10
}
)
data = json.loads(response.data)
assert response.status_code == 400
assert set(data.keys()) >= {"error"}
def test_gender_invalid_json(flask_app_client):
response = flask_app_client.get(
"/gender/men",
headers={"Accept": "application/json"},
json="asdfasd"
)
data = json.loads(response.data)
assert response.status_code == 400
assert set(data.keys()) >= {"error"}
| {
"content_hash": "cf5fae7a8190175edd758123edc47af6",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 56,
"avg_line_length": 24.247311827956988,
"alnum_prop": 0.6039911308203991,
"repo_name": "willrogerpereira/willbuyer",
"id": "fbb324e2937b028a1d20cbec95c26f90b3374110",
"size": "2255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "willstores/willstores/tests/gender/test_gender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4847"
},
{
"name": "HTML",
"bytes": "15697"
},
{
"name": "JavaScript",
"bytes": "38943"
},
{
"name": "Python",
"bytes": "83645"
}
],
"symlink_target": ""
} |
from pyjamas.ui.Sink import Sink, SinkInfo
from pyjamas.ui.Image import Image
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.AutoComplete import AutoCompleteTextBox
class AutoCompleteTab(Sink):
def __init__(self):
colours = ['Azure', 'Red', 'Rust', 'Green', 'Beige', 'Brass', 'Brown', 'Bronze', 'Blue', 'Black', 'Burgundy', 'Pink', 'Gold', 'Gray', 'Purple', 'Yellow', 'White']
Sink.__init__(self)
self.colour_input = AutoCompleteTextBox()
self.colour_input.setCompletionItems(colours)
panel = HorizontalPanel()
panel.add(HTML("Enter a colour: "))
panel.add(self.colour_input)
panel.setSpacing(8)
self.setWidget(panel)
def onShow(self):
#self.colour_input.setFocus(True)
return
def init():
text="<b>Text field auto-completion component</b><p>Shows a list of matching items as you type. Items can be selected with keyboard or mouse."
text+=r"<p>Originally by Oliver Albers at <a href=\"http://gwt.components.googlepages.com\">gwt.components.googlepages.com</a>"
return SinkInfo("AutoComplete", text, AutoCompleteTab)
| {
"content_hash": "ef96c59547a823e4b926185a7fecb200",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 170,
"avg_line_length": 40.45161290322581,
"alnum_prop": 0.6794258373205742,
"repo_name": "minghuascode/pyj",
"id": "e06aca0e004eb4c46ebafdfbbec287ac6eba86db",
"size": "1254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/addonsgallery/AutoCompleteTab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "107608"
},
{
"name": "JavaScript",
"bytes": "116371"
},
{
"name": "PHP",
"bytes": "5473"
},
{
"name": "Python",
"bytes": "7572605"
},
{
"name": "Shell",
"bytes": "24231"
}
],
"symlink_target": ""
} |
r"""HTTP cookie handling for web clients.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
distributed with the Python standard library, but are available from
http://wwwsearch.sf.net/):
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
"""
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
import os
import copy
import datetime
import re
import time
import urllib.parse, urllib.request
import threading as _threading
import http.client # only for the default HTTP port
from calendar import timegm
debug = False # set to True to enable debugging via the logging module
logger = None
def _debug(*args):
if not debug:
return
global logger
if not logger:
import logging
logger = logging.getLogger("http.cookiejar")
return logger.debug(*args)
DEFAULT_HTTP_PORT = str(http.client.HTTP_PORT)
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
def _warn_unhandled_exception():
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways. Warn if any
# exceptions are caught there.
import io, warnings, traceback
f = io.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2)
# Date/time conversion
# -----------------------------------------------------------------------------
EPOCH_YEAR = 1970
def _timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHS_LOWER = []
for month in MONTHS: MONTHS_LOWER.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None:
dt = datetime.datetime.utcnow()
else:
dt = datetime.datetime.utcfromtimestamp(t)
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None:
dt = datetime.datetime.utcnow()
else:
dt = datetime.datetime.utcfromtimestamp(t)
return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % (
DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII)
def offset_from_tz_string(tz):
offset = None
if tz in UTC_ZONES:
offset = 0
else:
m = TIMEZONE_RE.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
yr = int(yr)
if yr > datetime.MAXYEAR:
return None
# translate month name to number
# month numbers start with 1 (January)
try:
mon = MONTHS_LOWER.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = _timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
STRICT_DATE_RE = re.compile(
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII)
WEEKDAY_RE = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII)
LOOSE_HTTP_DATE_RE = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X | re.ASCII)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
r"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X | re. ASCII)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = ISO_DATE_RE.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
# Header parsing
# -----------------------------------------------------------------------------
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, str)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn(r"^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse (almost) of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859-1")]])
'text/plain; charset="iso-8859-1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859-1")]])
'text/plain, charset="iso-8859-1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
def strip_quotes(text):
if text.startswith('"'):
text = text[1:]
if text.endswith('"'):
text = text[:-1]
return text
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
# XXX: The following does not strictly adhere to RFCs in that empty
# names and values are legal (the former will only appear once and will
# be overwritten if multiple occurrences are present). This is
# mostly to deal with backwards compatibility.
for ii, param in enumerate(ns_header.split(';')):
param = param.strip()
key, sep, val = param.partition('=')
key = key.strip()
if not key:
if ii == 0:
break
else:
continue
# allow for a distinction between present and empty and missing
# altogether
val = val.strip() if sep else None
if ii != 0:
lc = key.lower()
if lc in known_attrs:
key = lc
if key == "version":
# This is an RFC 2109 cookie.
if val is not None:
val = strip_quotes(val)
version_set = True
elif key == "expires":
# convert expires date to seconds since epoch
if val is not None:
val = http2time(strip_quotes(val)) # None if invalid
pairs.append((key, val))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
IPV4_RE = re.compile(r"\.\d+$", re.ASCII)
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
if IPV4_RE.search(text):
return False
if text == "":
return False
if text[0] == "." or text[-1] == ".":
return False
return True
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
if i == -1 or i == 0:
# A does not have form NB, or N is the empty string
return False
if not B.startswith("."):
return False
if not is_HDN(B[1:]):
return False
return True
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
if IPV4_RE.search(text):
return False
return True
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$", re.ASCII)
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urllib.parse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""Path component of request-URI, as defined by RFC 2965."""
url = request.get_full_url()
parts = urllib.parse.urlsplit(url)
path = escape_path(parts.path)
if not path.startswith("/"):
# fix bad RFC 2396 absoluteURI
path = "/" + path
return path
def request_port(request):
host = request.host
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
_debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
path = urllib.parse.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
if not domain_match(req_host, reach(request.origin_req_host)):
return True
else:
return False
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(float(expires))
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return name in self._rest
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def is_expired(self, now=None):
if now is None: now = time.time()
if (self.expires is not None) and (self.expires <= now):
return True
return False
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ("version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
):
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customized policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
secure_protocols=("https", "wss")
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
self.secure_protocols = secure_protocols
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not self.path_return_ok(cookie.path, request)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.type not in self.secure_protocols:
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if domain and not domain.startswith("."):
dotdomain = "." + domain
else:
dotdomain = domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(dotdomain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if domain and not domain.startswith("."):
dotdomain = "." + domain
else:
dotdomain = domain
if not (req_host.endswith(dotdomain) or erhn.endswith(dotdomain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
pathlen = len(path)
if req_path == path:
return True
elif (req_path.startswith(path) and
(path.endswith("/") or req_path[pathlen:pathlen+1] == "/")):
return True
_debug(" %s does not path-match %s", req_path, path)
return False
def vals_sorted_by_key(adict):
keys = sorted(adict.keys())
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
yield from deepvalues(obj)
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib.request.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII)
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
cookies.sort(key=lambda a: len(a.path), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
ns_hdrs = headers.get_all("Set-Cookie", [])
self._policy._now = self._now = int(time.time())
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
# derives from OSError for backwards-compatibility with Python 2.4.0
class LoadError(OSError): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file."""
def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
filename = os.fspath(filename)
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename) as f:
self._really_load(f, filename, ignore_discard, ignore_expires)
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or OSError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
try:
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except OSError:
self._cookies = old_state
raise
finally:
self._cookies_lock.release()
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = sorted(cookie._rest.keys())
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl library, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename, "w") as f:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not self.magic_re.search(magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except OSError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = re.compile("#( Netscape)? HTTP Cookie File")
header = """\
# Netscape HTTP Cookie File
# http://curl.haxx.se/rfc/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not self.magic_re.search(magic):
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except OSError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename, "w") as f:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
| {
"content_hash": "470385b38aca25b663d791dce6c3e724",
"timestamp": "",
"source": "github",
"line_count": 2107,
"max_line_length": 83,
"avg_line_length": 36.4437588989084,
"alnum_prop": 0.5396616614791566,
"repo_name": "batermj/algorithm-challenger",
"id": "adc7ed62425d06f717deb2d899504b54d69a8285",
"size": "76787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/http/cookiejar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
} |
""" The generator creating the output HTML
APACHE LICENSE 2.0
Copyright 2013 Sebastian Dahlgren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import datetime
import jinja2
import markdown
from markdowndocs.log_handler import LOGGER as logger
def generate_html(markdown_files):
""" Generate HTML from a given markdown file
:type markdown_files: [MarkdownFile]
:param markdown_files: List of MarkdownFile object
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'templates')))
template = env.get_template('markdown-template.html')
for markdown_file in markdown_files:
logger.debug(
'Generating HTML for {}..'.format(markdown_file.source_file))
# Ensure that the output directory exists
try:
os.makedirs(markdown_file.destination_dir)
except OSError as (errno, errmsg):
if errno == 17:
# Code 17 == File exists
pass
else:
raise
with open(markdown_file.source_file, 'r') as file_handle:
try:
text = file_handle.read().decode('utf-8')
except UnicodeError:
logger.warning('UnicodeError when reading {}. Skipping.'.format(
markdown_file.source_file))
pass
with open(markdown_file.destination_file, 'w') as file_handle:
markdown_object = markdown.Markdown(
extensions=[
'meta',
'toc',
'tables',
'codehilite(linenums=False)'])
markdown_html = markdown_object.convert(text)
# Update the title, if the title attribute is in the parsed metadata
if 'title' in markdown_object.Meta:
markdown_file.set_metadata(
'title', markdown_object.Meta['title'][0])
html = template.render(
{
'title': markdown_file.get_metadata('title'),
'destination_root_dir': markdown_file.destination_root_dir,
'markdown_html': markdown_html,
'generation_timestamp': datetime.datetime.utcnow().strftime(
'%Y-%m-%d %H:%M')
})
file_handle.write(html.encode('utf-8'))
logger.debug('Wrote {}'.format(markdown_file.destination_file))
def generate_index_page(markdown_files):
""" Generate the index page
:type markdown_files: list
:param markdown_files: List of MarkdownFile objects to print to the index
"""
logger.debug('Generating index page..')
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'templates')))
template = env.get_template('index.html')
markdown_metadata = []
for markdown_file in markdown_files:
markdown_metadata.append(markdown_file.metadata)
index_path = os.path.join(markdown_file.destination_root_dir, 'index.html')
with open(index_path, 'w') as file_handle:
file_handle.write(template.render(
{
'markdown_metadata': markdown_metadata,
'generation_timestamp': datetime.datetime.utcnow().strftime(
'%Y-%m-%d %H:%M')
}))
def import_static_files(destination_root_dir):
""" Import all static files to the HTML output dir
:type destination_root_dir: str
:param destination_root_dir: Destination folder for HTML pages
"""
if os.path.exists(os.path.join(destination_root_dir, '_markdown-docs_static')):
shutil.rmtree(
os.path.join(destination_root_dir, '_markdown-docs_static'),
ignore_errors=True)
shutil.copytree(
os.path.join(os.path.dirname(__file__), 'static'),
os.path.join(destination_root_dir, '_markdown-docs_static'))
| {
"content_hash": "9bf679139958888a630bb44a550a9ae1",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 83,
"avg_line_length": 35.346456692913385,
"alnum_prop": 0.6123858320338605,
"repo_name": "sebdah/markdown-docs",
"id": "b1163245f7c8722d4fcd26319db6d0e1d3aca76f",
"size": "4513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markdowndocs/generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21223"
}
],
"symlink_target": ""
} |
import math
import mathutils
import struct
import bpy
import json
writePackedBinary = True
filepathPRFX = bpy.data.filepath.rsplit('.', 1)[0]
meshfile = open(filepathPRFX + ".mesh.bin", "wb") if writePackedBinary else open(filepathPRFX + ".mesh.json", "w")
matfile = open(filepathPRFX + ".mat.json", "w")
camfile = open(filepathPRFX + ".cam.json", "w")
objList = bpy.data.objects
matList = bpy.data.materials
scene = bpy.context.scene
materialDict = {}
matIdx = 0
materialExport = []
for mat in matList:
print('Exporting material: ' + mat.name)
material = {}
material["name"] = mat.name
material["specHardness"] = mat.specular_hardness
material["emits"] = mat.emit
material["ior"] = mat.raytrace_transparency.ior
materialType = "NULL"
if mat.use_transparency is True:
materialType = "GLASS"
# small hack because there is no definition for absorbtion color for dielectrics
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if mat.specular_intensity > 0 or mat.specular_hardness > 1:
if materialType is not "NULL":
print("WARNING: Non-unique material definition! Was [" + materialType + "], gets [PLASTIC]!")
materialType = "PLASTIC"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if mat.raytrace_mirror.use is True:
if materialType is not "NULL":
print("WARNING: Non-unique material definition! Was [" + materialType + "], gets [MIRROR]!")
materialType = "MIRROR"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if mat.emit > 0:
if materialType is not "NULL":
print("WARNING: Non-unique material definition! Was [" + materialType + "], gets [PLASTIC]!")
materialType = "EMITTING"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
if materialType is "NULL":
#fallback to DIFFUSE
materialType = "DIFFUSE"
material["color"] = [mat.diffuse_color[0], mat.diffuse_color[1], mat.diffuse_color[2]]
print("Identified " + mat.name + " as " + materialType+"\n")
material["type"] = materialType
materialExport.append(material)
materialDict[mat.name] = matIdx
matIdx += 1
matfile.write(json.dumps(materialExport))
# --------------------- Object Geometry export -----------------------------
if writePackedBinary is False:
meshfile.write("[") # manual json wrapper to save memory while exporting very large scenes
exportedMeshes = 0
polyCount = 0
for obj in objList:
if obj.type == "CAMERA":
cam = obj.data
if cam.type != "PERSP":
print('no support for camera models other than \'perspective\'. Ignoring ' + cam.name)
continue
else:
print("Exporting PERSP Camera")
focalLength = (cam.lens/cam.sensor_width)*36.0
objmatrix = obj.matrix_world
eyeV = mathutils.Vector([0, 0, 0, 1])
targetV = mathutils.Vector([0, 0, 1, 0])
upV = mathutils.Vector([0, 1, 0, 0])
eyeV = eyeV * objmatrix
dirV = targetV * objmatrix
upV = upV * objmatrix
camExport = {}
camExport["position"] = [obj.location[0], obj.location[1], obj.location[2]]
camExport["rotation"] = [obj.rotation_euler[0], obj.rotation_euler[1], obj.rotation_euler[2]]
camExport["viewDirection"] = [dirV[0], dirV[1], dirV[2]]
camExport["upVector"] = [upV[0], upV[1], upV[2]]
camExport["focalLength"] = focalLength
camfile.write(json.dumps(camExport))
if obj.type == "MESH":
print('Exporting a mesh object: ' + obj.name + '(' + obj.data.name + ')')
objMesh = obj.to_mesh(scene, True, 'RENDER')
objMesh.transform(obj.matrix_world, True)
if writePackedBinary:
for face in objMesh.polygons:
p0 = objMesh.vertices[face.vertices[0]].co
p1 = objMesh.vertices[face.vertices[1]].co
p2 = objMesh.vertices[face.vertices[2]].co
meshfile.write(struct.pack("fff", p0.x, p0.y, p0.z))
meshfile.write(struct.pack("fff", p1.x, p1.y, p1.z))
meshfile.write(struct.pack("fff", p2.x, p2.y, p2.z))
meshfile.write(struct.pack("B", materialDict[objMesh.materials[face.material_index].name]))
polyCount += 1
else:
if exportedMeshes > 0:
meshfile.write(", ")
mesh = {}
mesh["name"] = obj.name
mesh["type"] = "TRIANGULAR_MESH"
mesh["triangles"] = []
for face in objMesh.polygons:
p0 = objMesh.vertices[face.vertices[0]].co
p1 = objMesh.vertices[face.vertices[1]].co
p2 = objMesh.vertices[face.vertices[2]].co
mesh["triangles"].append({"p0": [p0.x, p0.y, p0.z], "p1": [p1.x, p1.y, p1.z], "p2": [p2.x, p2.y, p2.z],
"m": materialDict[objMesh.materials[face.material_index].name]})
polyCount += 1
meshfile.write(json.dumps(mesh))
exportedMeshes += 1
if exportedMeshes > 0 and writePackedBinary is False:
meshfile.write("]\n")
meshfile.close()
matfile.close()
camfile.close()
print("---------Statistics---------")
print("Nr. of Materials: " + str(matIdx))
print("Nr. of Meshes: " + str(exportedMeshes))
print("Nr. of Polygons: " + str(polyCount))
print("Nr. of Cameras: 1")
print("----------------------------")
print("Have fun!")
| {
"content_hash": "0df6bfed1f3db904fb47a97d71b2c76b",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 119,
"avg_line_length": 37.60526315789474,
"alnum_prop": 0.5844996501049685,
"repo_name": "bensteinert/chromarenderer-java",
"id": "5230bf2a2f1971de70a91e69cbcc724818bdb513",
"size": "5789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chroma-java-core/src/main/resources/blenderToChroma.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "277374"
},
{
"name": "Python",
"bytes": "11595"
},
{
"name": "Scala",
"bytes": "6488"
}
],
"symlink_target": ""
} |
from django import forms
import datetime
class RecipeForm(forms.Form):
name = forms.CharField(label='name')
created = forms.DateField(initial=datetime.date.today)
servings = forms.IntegerField(label='servings')
description = forms.CharField(label='description', widget=forms.Textarea)
ingredients = forms.CharField(label='ingredients', widget=forms.Textarea)
instructions = forms.CharField(label='instructions', widget=forms.Textarea)
| {
"content_hash": "52550a517c4afd0a56abd0772242ff28",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 79,
"avg_line_length": 41.90909090909091,
"alnum_prop": 0.7613882863340564,
"repo_name": "sarasafavi/introdjango",
"id": "a911ec82fbe1e3ad646f2d33273b1fd1e851ba22",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipeminder/recipes/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6080"
}
],
"symlink_target": ""
} |
"""
PyRemote
Used by PyRemote files generated by
/thrift/compiler/generate/t_py_generator.cc
Remote.run is the interface used by the generated code.
Based on whether --host or --url is specified as a commandline option,
either a RemoteHostClient or RemoteHttpClient is instantiated to
handle the request.
Additional remote client types (subclasses of RemoteClient) can be
registered with the Remote class to define different ways of specifying a
host or communicating with the host. When registering a new client type,
you can specify the option used to select that type (i.e., url) with the
SELECTOR_OPTIONS attribute, and you can specify additional commandline options
with the CMDLINE_OPTIONS attribute. See the implementations of RemoteHostClient
and RemoteHttpClient for examples.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
import os
import pprint
import sys
import traceback
from six import string_types
from six.moves.urllib.parse import urlparse
from thrift import Thrift
from thrift.protocol import (
TBinaryProtocol,
TCompactProtocol,
THeaderProtocol,
TJSONProtocol,
TSimpleJSONProtocol,
)
from thrift.transport import THttpClient, TSocket, TSSLSocket, TTransport
from thrift.transport.THeaderTransport import THeaderTransport
class Function(object):
"""Metadata for a service method"""
def __init__(self, fn_name, svc_name, return_type, args):
self.fn_name = fn_name
self.svc_name = svc_name
self.return_type = return_type
self.args = args
def print_functions(functions, service_names, out, local_only: bool = False) -> None:
"""Print all the functions available from this thrift service"""
fns_by_service_name = {svc_name: {} for svc_name in service_names}
for fn in functions.values():
fns_by_service_name[fn.svc_name][fn.fn_name] = fn
svc_names = service_names[0:1] if local_only else reversed(service_names)
for svc_name in svc_names:
out.write("Functions in %s:\n" % (svc_name,))
for fn_name, fn in sorted(fns_by_service_name[svc_name].items()):
if fn.return_type is None:
out.write(" oneway void ")
else:
out.write(" %s " % (fn.return_type,))
out.write(fn_name + "(")
out.write(
", ".join("%s %s" % (type, name) for type, name, true_type in fn.args)
)
out.write(")\n")
format_to_helper = {
"input": {},
"output": {},
}
format_to_help_message = {
"input": {},
"output": {},
}
def add_format(name, format_type: str, help_msg=None):
"""
Decorate function to set it as a handler for the specified format and format_type
All functions with same format_type must share the same interface/signature.
In other cases, the signature is allowed to differ.
"""
lookup_table = format_to_helper[format_type]
def builder(func):
if name in lookup_table:
raise ValueError("Format name '{}' is used twice".format(name))
lookup_table[name] = func
if help_msg is not None:
format_to_help_message[format_type][name] = help_msg
return func
return builder
def get_helper_for_format(name, format_type: str):
helper = format_to_helper[format_type].get(name)
if name == "help":
full_help_message = "\nDetailed help messages:\n\n" + "\n\n".join(
"[{}] {}".format(*x)
for x in sorted(
format_to_help_message[format_type].items(),
key=lambda x: x[0],
)
)
print(
"List of all formats: {}".format(
", ".join(format_to_helper[format_type].keys())
),
full_help_message if format_to_help_message[format_type] else "",
file=sys.stderr,
)
sys.exit(os.EX_USAGE)
if helper is None:
sys.stderr.write("Invalid {} format: {}\n".format(format_type, name))
sys.exit(os.EX_USAGE)
return helper
@add_format("python", "output")
def __python_output_handler(ret: object) -> None:
if isinstance(ret, string_types):
print(ret)
else:
pprint.pprint(ret, indent=2)
def __thrift_to_json(x):
trans = TTransport.TMemoryBuffer()
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans)
x.write(proto)
return json.loads(trans.getvalue())
@add_format("json", "output")
def __json_output_handler(ret) -> None:
"""
Python object
{
"foo": [
ThriftStructB(
x=2
),
],
"x": ["%set is nice", 9,8,7, set("blah % blah", 4, 5, 6)],
"bar": ThriftStructA(
x=1,
y="b",
z=[1,2,3]
),
}
<=>
JSON object
{
"foo": [
{"x": 2}
],
"x": ["%set is nice", 9,8,7, ["blah % blah", 4, 5, 6]],
"bar": {
"x": 1,
"y": "b",
"z": [1,2,3]
}
}
There is no need to handle the type ambiguity between Json dict and
thrift structs, because pyremote knows what type the services want,
and we simply try to convert them to that type.
Also, the exact form of dictionaries produced for Thrift structs may differ
based across different Thrift versions.
"""
print(json.dumps(ret, default=__thrift_to_json))
def __eval_arg(arg, thrift_types):
"""Evaluate a commandline argument within the scope of the IF types"""
code_globals = {}
code_globals.update(thrift_types)
# Explicitly compile the code so that it does not inherit our
# __future__ directives imported above. In particular this ensures
# that string literals are not treated as unicode unless explicitly
# qualified as such.
code = compile(arg, "<command_line>", "eval", 0, 1)
return eval(code, code_globals)
def __preprocess_input(fn, args, ctx):
if len(args) != len(fn.args):
sys.stderr.write(
('"%s" expects %d arguments (received %d)\n')
% (fn.fn_name, len(fn.args), len(args))
)
sys.exit(os.EX_USAGE)
# Get all custom Thrift types
return {key: getattr(ctx.ttypes, key) for key in dir(ctx.ttypes)}
@add_format(
"python",
"input",
(
'Evaluate every string in "function_args" using eval() so '
"that we can support any type of data, unless we already know "
"the thrift function accepts that argument as a string. In that "
"case, we simply pass your string without eval()."
),
)
def __python_natural_input_handler(fn, args, ctx):
return __python_eval_input_handler(
fn, [repr(x) if y[2] == "string" else x for x, y in zip(args, fn.args)], ctx
)
@add_format(
"python_eval",
"input",
('Similar to "python", but we evaluate everything, including strings.'),
)
def __python_eval_input_handler(fn, args, ctx):
thrift_types = __preprocess_input(fn, args, ctx)
fn_args = []
for arg in args:
try:
value = __eval_arg(arg, thrift_types)
except Exception:
traceback.print_exc(file=sys.stderr)
sys.stderr.write('error parsing argument "%s"' % (arg,))
sys.exit(os.EX_DATAERR)
fn_args.append(value)
return fn_args
@add_format(
"python_eval_stdin",
"input",
(
'Disables the command line option "function_args", and requires '
"you to pass parameters from stdin. The string you passed in will "
"be sent to eval(). And it must produce a Python list of objects, "
"which represents the input argument list to the thrift function."
),
)
def __python_stdin_input_handler(fn, args, ctx):
new_args = json.load(sys.stdin)
return __python_eval_input_handler(fn, new_args, ctx)
def __args_class_for_function(fn, service_class):
args_class = getattr(service_class, fn.fn_name + "_args", None)
if not args_class:
sys.stderr.write(
"ERROR: <function name>_args class is unexpected missing. Thrift "
"may have deprecated its usage. Please re-implement pyremote."
)
sys.exit(os.EX_USAGE)
return args_class
@add_format(
"json",
"input",
(
'Please pass in only one string as "function_args". This string '
"is a json. Its top level must be a dictionary mapping names of "
"the thrift function's parameters to the value you want to pass "
"in. Make sure to represent thrift objects using the same format "
"as generated by pyremote (when using json output format). [Hint: "
"use this option with a command line tool that can operate on JSONs]"
),
)
def __json_natural_input_handler(fn, args, ctx):
if len(args) != 1:
sys.stderr.write(
'Error: when using "json" input format, only one cmdline argument '
"should be used to specify function call arguments. Store arguments "
"as a json list."
)
sys.exit(os.EX_USAGE)
partially_decoded = json.loads(args[0])
if not isinstance(partially_decoded, dict):
sys.stderr.write(
"ERROR: Your json input must be a dictionary (of function arguments).\n"
)
sys.exit(os.EX_USAGE)
args_class = __args_class_for_function(fn, ctx.service_class)
args_obj = args_class()
args_obj.readFromJson(partially_decoded, is_text=False)
ans = [getattr(args_obj, arg_name, None) for _, arg_name, _ in fn.args]
if None in ans:
sys.stderr.write(
"ERROR: <function name>_args class is unexpected missing. Thrift "
"may have deprecated its usage. Please re-implement pyremote."
)
sys.exit(os.EX_USAGE)
return ans
@add_format(
"json_stdin",
"input",
(
'Similar to "json". But this disables the command line option "function_args" '
"and accepts one json string from stdin."
),
)
def __json_stdin_input_handler(fn, args, ctx):
return __json_natural_input_handler(fn, [sys.stdin.read()], ctx)
def __is_thrift_struct(obj) -> bool:
try:
json.dumps(obj)
return False
except BaseException:
return True
def __get_template_for_struct(struct_type):
fields = [(x[1], x[2], x[3]) for x in struct_type.thrift_spec if x is not None]
ans = {}
for type1, name, type2 in fields:
if type1 != Thrift.TType.STRUCT:
ans[name] = "TEMPLATE [TYPE UNKNOWN]"
continue
ans[name] = __get_template_for_struct(type2[0])
return ans
def get_json_template_obj(name, functions, service_class):
fn = functions.get(name)
struct = getattr(service_class, name, None)
if fn is None and struct is None:
sys.stderr.write("ERROR: unknown structure/function: {}\n".format(name))
sys.exit(os.EX_USAGE)
if fn is not None:
print(
"Treating",
name,
"as a function. Generating template for its arguments...",
file=sys.stderr,
)
ans_class = __args_class_for_function(fn, service_class)
elif struct is not None:
print(
"Treating",
name,
"as a structure. Generating template for it...",
file=sys.stderr,
)
ans_class = struct
return __get_template_for_struct(ans_class)
class RemoteClient(object):
def __init__(
self, functions, service_names, service_class, ttypes, print_usage, default_port
):
self.functions = functions
self.service_names = service_names
self.service_class = service_class
self.ttypes = ttypes
self.print_usage = print_usage
self.default_port = default_port
def _exit(self, error_message=None, status=os.EX_USAGE, err_out=sys.stderr):
"""Report an error, show help information, and exit the program"""
if error_message is not None:
print("Error: %s" % error_message, file=err_out)
if status is os.EX_USAGE:
self.print_usage(err_out)
if self.functions is not None and status in {os.EX_USAGE, os.EX_CONFIG}:
print_functions(self.functions, self.service_names, err_out)
sys.exit(status)
def _validate_options(self, options):
"""Check option validity and call _exit if there is an error"""
pass
def _get_client(self, options):
"""Get the thrift client that will be used to make method calls"""
raise TypeError("_get_client should be called on " "a subclass of RemoteClient")
def _close_client(self):
"""After making the method call, do any cleanup work"""
pass
def _process_args(self, cmdline_args):
"""Populate instance data using commandline arguments"""
fn_name = cmdline_args.function_name
if fn_name not in self.functions:
self._exit(
error_message='Unknown function "%s"' % fn_name, status=os.EX_CONFIG
)
else:
function = self.functions[fn_name]
function_args = cmdline_args.input_format(
function, cmdline_args.function_args, self
)
self._validate_options(cmdline_args)
return function.fn_name, function_args
def _execute(self, fn_name, fn_args, cmdline_args):
"""Make the requested call.
Assumes _parse_args() and _process_args() have already been called.
"""
client = self._get_client(cmdline_args)
# Call the function
method = getattr(client, fn_name)
try:
ret = method(*fn_args)
except Thrift.TException as e:
ret = "Exception:\n" + str(e)
cmdline_args.output_format(ret)
transport = client._iprot.trans
if isinstance(transport, THeaderTransport):
response_headers = transport.get_headers()
if response_headers is not None and len(response_headers) > 0:
print("Response headers:")
pprint.pprint(transport.get_headers(), indent=2)
self._close_client()
def run(self, cmdline_args):
fn_name, fn_args = self._process_args(cmdline_args)
self._execute(fn_name, fn_args, cmdline_args)
self._exit(status=0)
def ssl_parsed_bool(arg: bool) -> bool:
if isinstance(arg, bool):
return arg
if arg in ("true", "1"):
return True
elif arg in ("false", "0"):
return False
else:
raise argparse.ArgumentTypeError("argument must be one of true, 1, false, or 0")
class RemoteTransportClient(RemoteClient):
"""Abstract class for clients with transport manually opened and closed"""
CMDLINE_OPTIONS = [
(
("-f", "--framed"),
{"action": "store_true", "default": False, "help": "Use framed transport"},
),
(
("-s", "--ssl"),
{
"action": "store",
"type": ssl_parsed_bool,
"default": True,
"const": True,
"nargs": "?",
"help": "Use SSL socket",
},
),
(
("-U", "--unframed"),
{
"action": "store_true",
"default": False,
"help": "Use unframed transport",
},
),
(
("-j", "--json"),
{"action": "store_true", "default": False, "help": "Use TJSONProtocol"},
),
(
("-c", "--compact"),
{"action": "store_true", "default": False, "help": "Use TCompactProtocol"},
),
(
("-H", "--headers"),
{
"action": "store",
"metavar": "HEADERS_DICT",
"help": "Python code to eval() into a dict of write headers",
},
),
]
def _get_client_by_transport(self, options, transport, socket=None):
# Create the protocol and client
if options.json:
protocol = TJSONProtocol.TJSONProtocol(transport)
elif options.compact:
protocol = TCompactProtocol.TCompactProtocol(transport)
# No explicit option about protocol is specified. Try to infer.
elif options.framed or options.unframed:
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
elif socket is not None:
# If json, compact, framed, and unframed are not specified,
# THeaderProtocol is the default.
transport = THeaderTransport(socket)
if options.headers is not None:
try:
parsed_headers = eval(options.headers)
except Exception:
self._exit(
error_message="Request headers (--headers) argument"
" failed eval"
)
if not isinstance(parsed_headers, dict):
self._exit(
error_message="Request headers (--headers) argument"
" must evaluate to a dict"
)
for header_name, header_value in parsed_headers.items():
transport.set_header(header_name, header_value)
protocol = THeaderProtocol.THeaderProtocol(transport)
else:
self._exit(
error_message=("No valid protocol " "specified for %s" % (type(self))),
status=os.EX_USAGE,
)
transport.open()
self._transport = transport
client = self.service_class.Client(protocol)
return client
def _close_client(self):
self._transport.close()
def _validate_options(self, options):
super(RemoteTransportClient, self)._validate_options(options)
if options.framed and options.unframed:
self._exit(error_message="cannot specify both " "--framed and --unframed")
def _parse_host_port(self, value, default_port):
parts = value.rsplit(":", 1)
if len(parts) == 1:
return (parts[0], default_port)
try:
port = int(parts[1])
except ValueError:
raise ValueError("invalid port: " + parts[1])
return (parts[0], port)
class RemoteHostClient(RemoteTransportClient):
SELECTOR_OPTIONS = "host"
CMDLINE_OPTIONS = list(RemoteTransportClient.CMDLINE_OPTIONS) + [
(
("-h", "--host"),
{
"action": "store",
"metavar": "HOST[:PORT]",
"help": "The host and port to connect to",
},
)
]
def _get_client(self, options):
host, port = self._parse_host_port(options.host, self.default_port)
socket = (
TSSLSocket.TSSLSocket(host, port)
if options.ssl
else TSocket.TSocket(host, port)
)
if options.framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
return self._get_client_by_transport(options, transport, socket=socket)
class RemoteHttpClient(RemoteTransportClient):
SELECTOR_OPTIONS = "url"
CMDLINE_OPTIONS = list(RemoteTransportClient.CMDLINE_OPTIONS) + [
(
("-u", "--url"),
{"action": "store", "help": "The URL to connect to, for HTTP transport"},
)
]
def _get_client(self, options):
url = urlparse(options.url)
host, port = self._parse_host_port(url[1], 80)
transport = THttpClient.THttpClient(options.url)
return self._get_client_by_transport(options, transport)
def _validate_options(self, options):
"""Check if there are any option inconsistencies, and exit if so"""
super(RemoteHttpClient, self)._validate_options(options)
if not any([options.unframed, options.json]):
self._exit(
error_message="can only specify --url with " "--unframed or --json"
)
class RemoteUNIXDomainClient(RemoteTransportClient):
SELECTOR_OPTIONS = "path"
CMDLINE_OPTIONS = list(RemoteTransportClient.CMDLINE_OPTIONS) + [
(("-p", "--path"), {"action": "store", "help": "The path of the socket to use"})
]
def _get_client(self, options):
socket = TSocket.TSocket(unix_socket=options.path)
if options.framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
return self._get_client_by_transport(options, transport, socket=socket)
class Namespace(object):
def __init__(self, attrs=None):
if attrs is not None:
self.__dict__.update(attrs)
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
class Remote(object):
__client_types = set()
__occupied_args = {}
__parser = argparse.ArgumentParser(add_help=False)
@classmethod
def register_cmdline_options(cls, cmdline_options):
for args, kwargs in cmdline_options:
is_repeated = False
for arg in args:
if arg in cls.__occupied_args:
if cls.__occupied_args[arg] != kwargs:
raise ValueError("Redefinition of {}".format(arg))
is_repeated = True
if is_repeated:
continue
cls.__occupied_args.update({x: kwargs for x in args})
cls.__parser.add_argument(*args, **kwargs)
@classmethod
def register_client_type(cls, client_type):
if not issubclass(client_type, RemoteClient):
raise TypeError(
(
"Remote client must be of type RemoteClient. "
"Got type %s." % client_type.__name__
)
)
if client_type is RemoteClient:
raise TypeError(
("Remote client must be a strict subclass " "of RemoteClient.")
)
if not hasattr(client_type, "SELECTOR_OPTIONS"):
raise AttributeError(
("Remote client must have a " "SELECTOR_OPTIONS field.")
)
cls.__client_types.add(client_type)
cls.register_cmdline_options(client_type.CMDLINE_OPTIONS)
@classmethod
def _exit_usage_error(cls, message):
sys.stderr.write("ERROR: " + message + "\n")
cls.__parser.print_help(sys.stderr)
sys.exit(os.EX_USAGE)
@classmethod
def _get_client_type(cls, options):
matching_types = [
ct
for ct in cls.__client_types
if getattr(options, ct.SELECTOR_OPTIONS) is not None
]
if len(matching_types) != 1:
cls._exit_usage_error(
"Must specify exactly one of [%s]"
% (", ".join("--%s" % ct.SELECTOR_OPTIONS for ct in cls.__client_types))
)
else:
return matching_types[0]
@classmethod
def _parse_cmdline_options(cls, argv):
cls.register_cmdline_options(
(
(
("-ifmt", "--input-format"),
{
"action": "store",
"default": "python",
"type": lambda x: get_helper_for_format(x, "input"),
"help": (
"Change the format for function_args. Generally speaking, "
"there are two main formats: python_* and json_*. Defaults "
'to "python". Use -ifmt help for entire list of available '
"formats."
),
},
),
(
(
"-ofmt",
"--output-format",
),
{
"action": "store",
"default": "python",
"type": lambda x: get_helper_for_format(x, "output"),
"help": (
"Change the output format for the return value. The "
'default is "python", which direclty prints out strings '
"and pprint() other types. Available formats: {}."
).format(",".join(format_to_helper["output"].keys())),
},
),
(
("--help",),
{"action": "help"},
),
(
("-la", "--list-all-functions"),
{"action": "store_true"},
),
(
(
"-l",
"--list-functions",
),
{"action": "store_true"},
),
(
("-g", "--generate-template"),
{
"action": "store",
"metavar": "THRIFT_STRUCT_OR_FUNCTION_NAME",
"help": (
"Generate a template for a thrift struct, OR, arguments of "
"a function call. Currently it supports only json format. "
"No need to specify function_name."
),
},
),
(
("function_name",),
{"nargs": "?", "help": "Name of the remote function to call"},
),
(
("function_args",),
{
"nargs": "*",
"help": (
"Arguments for the remote function. Look at --input-format "
"for more details."
),
},
),
)
)
try:
return cls.__parser.parse_args(argv[1:])
except BaseException:
sys.exit(os.EX_USAGE)
@classmethod
def run(
cls, functions, service_names, service_class, ttypes, argv, default_port=9090
):
args = cls._parse_cmdline_options(argv)
conflicts = [
x
for x in [
"list_all_functions",
"list_functions",
"generate_template",
]
if getattr(args, x)
]
if len(conflicts) > 1:
cls._exit_usage_error(
"Please do not specify all of {} at once.".format(",".join(conflicts))
)
if args.list_all_functions:
print_functions(functions, service_names, sys.stdout, local_only=False)
return
if args.list_functions:
print_functions(functions, service_names, sys.stdout, local_only=True)
return
if args.function_name is None:
cls._exit_usage_error("Please specify function_name.")
if args.generate_template:
ans = get_json_template_obj(
args.generate_template, functions, service_class
)
print(json.dumps(ans))
return
client_type = cls._get_client_type(args)
client = client_type(
functions,
service_names,
service_class,
ttypes,
cls.__parser.print_help,
default_port,
)
client.run(args)
Remote.register_client_type(RemoteHostClient)
Remote.register_client_type(RemoteHttpClient)
Remote.register_client_type(RemoteUNIXDomainClient)
| {
"content_hash": "ddea308f11a8779e601a35bda1e16b23",
"timestamp": "",
"source": "github",
"line_count": 836,
"max_line_length": 88,
"avg_line_length": 33.44976076555024,
"alnum_prop": 0.5479902732084108,
"repo_name": "facebook/fbthrift",
"id": "c1d9d536ce5f042f8f9feae22ede001a5b0f888d",
"size": "28579",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "thrift/lib/py/util/remote.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15608"
},
{
"name": "C++",
"bytes": "10658844"
},
{
"name": "CMake",
"bytes": "147347"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "Cython",
"bytes": "339005"
},
{
"name": "Emacs Lisp",
"bytes": "11229"
},
{
"name": "Go",
"bytes": "447092"
},
{
"name": "Hack",
"bytes": "313122"
},
{
"name": "Java",
"bytes": "1990062"
},
{
"name": "JavaScript",
"bytes": "38872"
},
{
"name": "Mustache",
"bytes": "1269560"
},
{
"name": "Python",
"bytes": "1623026"
},
{
"name": "Ruby",
"bytes": "6111"
},
{
"name": "Rust",
"bytes": "283392"
},
{
"name": "Shell",
"bytes": "6615"
},
{
"name": "Thrift",
"bytes": "1859041"
},
{
"name": "Vim Script",
"bytes": "2887"
}
],
"symlink_target": ""
} |
"""
WSGI config for DjangoTest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoTest.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "4a599982b94a9f2defac2605295e3e7a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.214285714285715,
"alnum_prop": 0.7772151898734178,
"repo_name": "walkskyer/DjangoTest",
"id": "9000219092d63127bebb179204d0c9da2e7ea3de",
"size": "395",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "DjangoTest/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3190"
}
],
"symlink_target": ""
} |
''' a package of helper methods to tackle common aws client interactions '''
__author__ = 'rcj1492'
__created__ = '2017.06'
__licence__ = 'MIT'
def construct_client_ec2(ec2_cred, region_name=''):
if region_name:
ec2_cred['region_name'] = region_name
from labpack.authentication.aws.iam import AWSConnectionError
from labpack.platforms.aws.ec2 import ec2Client
try:
ec2_client = ec2Client(**ec2_cred)
except AWSConnectionError as err:
error_lines = str(err).splitlines()
raise Exception('AWS configuration has the following problem:\n%s' % error_lines[-1])
ec2_client.list_keypairs()
return ec2_client
def retrieve_instance_details(ec2_client, container_alias, environment_type, resource_tags):
valid_instances = []
filter_insert = 'for service "%s"' % container_alias
if not container_alias:
filter_insert = 'for service in working directory'
filter_insert += ' in AWS region %s' % (ec2_client.iam.region_name)
tag_values = {}
split_tags = []
if environment_type:
filter_insert += ' in the "%s" env' % environment_type
tag_values['Env'] = environment_type
if resource_tags:
from labpack.parsing.grammar import join_words
split_tags = resource_tags.split(',')
filter_insert += ' with tags %s' % join_words(split_tags)
if tag_values:
instance_list = ec2_client.list_instances(tag_values=tag_values)
else:
instance_list = ec2_client.list_instances()
for instance_id in instance_list:
instance_details = ec2_client.read_instance(instance_id)
if instance_details['tags']:
instance_tags = set()
search_tags = set()
for item in split_tags:
search_tags.add(item)
if container_alias:
search_tags.add(container_alias)
for tag in instance_details['tags']:
for item in tag['value'].split(','):
instance_tags.add(item)
if not search_tags - instance_tags:
valid_instances.append(instance_details)
# verify only one instance exists
if not valid_instances:
raise Exception('No instances found %s.\nTry: lab list instances ec2' % filter_insert)
elif len(valid_instances) > 1:
raise Exception('More than one instance was found %s.\nTry adding optional flags as filters.' % filter_insert)
instance_details = valid_instances[0]
return instance_details
def compile_instances(region_name='', service_list=None):
instance_list = []
# check for dependencies
from pocketlab.methods.dependencies import import_boto3
import_boto3('ec2 platform')
# TODO support compilation of accounts and regions
# define service fields
service_name = ''
service_root = './'
service_insert = ' in working directory'
if service_list and isinstance(service_list, list):
service_name = service_list[0]['name']
service_root = service_list[0]['path']
service_insert = service_list[0]['insert']
# retrieve aws config
from pocketlab import __module__
from jsonmodel.loader import jsonLoader
from jsonmodel.validators import jsonModel
from pocketlab.methods.validation import validate_platform
aws_schema = jsonLoader(__module__, 'models/aws-config.json')
aws_model = jsonModel(aws_schema)
aws_config = validate_platform(aws_model, service_root, service_name, '.lab')
# retrieve instance details from ec2
from pocketlab.init import logger
logger.disabled = True
ec2_cred = {
'access_id': aws_config['aws_access_key_id'],
'secret_key': aws_config['aws_secret_access_key'],
'region_name': aws_config['aws_default_region'],
'owner_id': aws_config['aws_owner_id'],
'user_name': aws_config['aws_user_name'],
'verbose': False
}
ec2_client = construct_client_ec2(ec2_cred, region_name)
ec2_list = ec2_client.list_instances()
# construct instance details
for instance_id in ec2_list:
instance_details = {
'id': instance_id,
'updated': '',
'state': '',
'name': '',
'login': '',
'services': '',
'environment': '',
'machine': '',
'image': '',
'ip_address': '',
'region': '',
'access_key': '',
'tags': ''
}
ec2_details = ec2_client.read_instance(instance_id)
if ec2_details['tags']:
instance_tags = []
for tag in ec2_details['tags']:
if tag['key'] == 'Services':
instance_details['services'] = tag['value'].strip()
elif tag['key'] == 'Env':
instance_details['environment'] = tag['value'].strip()
elif tag['key'] == 'Name':
instance_details['name'] = tag['value'].strip()
elif tag['key'] == 'LoginName':
instance_details['login'] = tag['value'].strip()
else:
instance_tags.extend(tag['value'].split(','))
if instance_tags:
instance_details['tags'] = ','.join(instance_tags)
if 'instance_type' in ec2_details.keys():
instance_details['machine'] = ec2_details['instance_type'].strip()
if 'key_name' in ec2_details.keys():
instance_details['access_key'] = ec2_details['key_name']
if 'image_id' in ec2_details.keys():
instance_details['image'] = ec2_details['image_id']
if 'region' in ec2_details.keys():
instance_details['region'] = ec2_details['region']
if 'public_ip_address' in ec2_details.keys():
instance_details['ip_address'] = ec2_details['public_ip_address']
if 'state' in ec2_details.keys():
instance_details['state'] = ec2_details['state']['name']
instance_list.append(instance_details)
logger.disabled = False
return instance_list
def compile_schema(file_path='models/ec2-config.json'):
from pocketlab import __module__
from jsonmodel.loader import jsonLoader
config_schema = jsonLoader(__module__, file_path)
iam_schema = jsonLoader('labpack', 'authentication/aws/iam-rules.json')
ec2_schema = jsonLoader('labpack', 'platforms/aws/ec2-rules.json')
for key, value in iam_schema['components'].items():
if key in config_schema['components'].keys():
for k, v in value.items():
if k not in config_schema['components'][key].keys():
config_schema['components'][key][k] = v
for key, value in ec2_schema['components'].items():
if key in config_schema['components'].keys():
for k, v in value.items():
if k not in config_schema['components'][key].keys():
config_schema['components'][key][k] = v
return config_schema
def generate_ec2(serv_name):
if not serv_name:
serv_name = 'server'
# compile schema
config_schema = compile_schema('models/ec2-config.json')
# compile yaml and save
from pocketlab.methods.config import compile_yaml
config_text = compile_yaml(config_schema)
from labpack.records.time import labDT
new_dt = labDT.new()
dt_string = str(new_dt.date()).replace('-', '')
config_text = config_text.replace('generate-date', dt_string)
config_text = config_text.replace('generate-service', serv_name)
for key_name in ('region_name', 'iam_profile', 'elastic_ip'):
key_pattern = '\n%s:' % key_name
if config_text.find(key_pattern) > -1:
config_text = config_text.replace(key_pattern, "\n# %s:" % key_name)
return config_text
def initialize_clients(aws_cred, service_name, service_insert, service_root, region_name, environment_type, resource_tags, verbose):
# retrieve instance details from ec2
from pocketlab.init import logger
logger.disabled = True
if verbose:
print('Retrieving ec2 instance details ... ', end='', flush=True)
try:
ec2_cred = {
'access_id': aws_cred['aws_access_key_id'],
'secret_key': aws_cred['aws_secret_access_key'],
'region_name': aws_cred['aws_default_region'],
'owner_id': aws_cred['aws_owner_id'],
'user_name': aws_cred['aws_user_name'],
'verbose': False
}
if region_name:
ec2_cred['region_name'] = region_name
ec2_client = construct_client_ec2(ec2_cred)
instance_details = retrieve_instance_details(ec2_client, service_name, environment_type, resource_tags)
if verbose:
print('done.')
except Exception:
if verbose:
print('ERROR.')
raise
# verify pem file exists
from os import path
pem_name = instance_details['key_name']
pem_folder = path.join(service_root, '.lab')
pem_file = path.join(pem_folder, '%s.pem' % pem_name)
if not path.exists(pem_file):
raise Exception('SSH requires %s.pem file in the .lab folder of service %s.' % (pem_name, service_insert))
# construct ssh client and open terminal
ssh_client = establish_connection(
aws_cred=aws_cred,
instance_id=instance_details['instance_id'],
pem_file=pem_file,
service_insert=service_insert,
region_name=region_name,
verbose=verbose
)
return ec2_client, ssh_client, instance_details
def establish_connection(aws_cred, instance_id, pem_file, service_insert, region_name, verbose):
from pocketlab.init import logger
logger.disabled = True
ec2_cred = {
'access_id': aws_cred['aws_access_key_id'],
'secret_key': aws_cred['aws_secret_access_key'],
'region_name': aws_cred['aws_default_region'],
'owner_id': aws_cred['aws_owner_id'],
'user_name': aws_cred['aws_user_name'],
'verbose': False
}
if region_name:
ec2_cred['region_name'] = region_name
ssh_config = {
'instance_id': instance_id,
'pem_file': pem_file
}
ssh_config.update(ec2_cred)
ssh_config['verbose'] = False
from labpack.platforms.aws.ssh import sshClient
if verbose:
print('Establishing ssh connection ... ', end='', flush=True)
try:
ssh_client = sshClient(**ssh_config)
if verbose:
print('done.')
except Exception as err:
if verbose:
print('ERROR.')
error_msg = str(err)
if str(error_msg).find('private key files are NOT accessible by others') > -1:
from os import path
pem_root, pem_node = path.split(pem_file)
if pem_node:
pem_name, pem_ext = path.splitext(pem_node)
else:
pem_name, pem_ext = path.splitext(pem_root)
error_msg += '\nTry: "chmod 600 %s.pem" in the .lab folder of service %s.' % (pem_name, service_insert)
raise Exception(error_msg)
else:
raise
logger.disabled = False
ssh_client.ec2.iam.verbose = verbose
return ssh_client
| {
"content_hash": "d8f7b23f70356f418a603cc3133dd093",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 132,
"avg_line_length": 37.939189189189186,
"alnum_prop": 0.598753339269813,
"repo_name": "collectiveacuity/pocketLab",
"id": "3f35f07b613008f3e7bad1bb28d5d0bff465ebca",
"size": "11230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pocketlab/methods/aws.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "242167"
}
],
"symlink_target": ""
} |
import sys
from django.utils import six
from django.db.models.query import QuerySet as DjangoQuerySet
from mongoengine.errors import NotUniqueError
from mongoengine import queryset as qs
class QueryWrapper(object):
# XXX: copy funcs from django; now it's just wrapper
select_related = False
order_by = []
def __init__(self, q, ordering):
self.q = q
self.order_by = ordering or []
class BaseQuerySet(object):
"""
A base queryset with django-required attributes
"""
@property
def model(self):
return self._document
@property
def query(self):
return QueryWrapper(self._query, self._ordering)
@property
def _prefetch_related_lookups(self):
# Originally used in django for prefetch_related(),
# see https://docs.djangoproject.com/en/1.9/ref/models/querysets/#prefetch-related
# returning empty list to presume that no query prefetch is required
return []
def iterator(self):
return self
def get_queryset(self):
return self
def latest(self, field_name):
return self.order_by("-" + field_name).first()
def earliest(self, field_name):
return self.order_by(field_name).first()
def exists(self):
return bool(self)
def _clone(self):
return self.clone()
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self._ordering:
return True
elif self._document._meta.ordering:
return True
else:
return False
get_or_create = DjangoQuerySet.__dict__["get_or_create"]
_extract_model_params = DjangoQuerySet.__dict__["_extract_model_params"]
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
obj = self.create(**params)
return obj, True
except NotUniqueError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
class QuerySet(BaseQuerySet, qs.QuerySet):
pass
class QuerySetNoCache(BaseQuerySet, qs.QuerySetNoCache):
pass
class QuerySetManager(qs.QuerySetManager):
default = QuerySet
| {
"content_hash": "9714d6c0d493a83e551a3725ad499345",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 90,
"avg_line_length": 24.784313725490197,
"alnum_prop": 0.6139240506329114,
"repo_name": "unixhot/opencmdb",
"id": "cff5bda49f9a533d286e33fe8f6d500d9115c586",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_mongoengine/queryset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "61198"
},
{
"name": "HTML",
"bytes": "44545"
},
{
"name": "JavaScript",
"bytes": "503133"
},
{
"name": "Python",
"bytes": "244232"
},
{
"name": "Vue",
"bytes": "95038"
}
],
"symlink_target": ""
} |
"""Defer to run_test_cases.py."""
import os
import optparse
import sys
ROOT_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def pop_known_arguments(args):
"""Extracts known arguments from the args if present."""
rest = []
run_test_cases_extra_args = []
for arg in args:
if arg.startswith(('--gtest_filter=', '--gtest_output=', '--clusters=')):
run_test_cases_extra_args.append(arg)
elif arg in ('--run-manual', '--verbose'):
run_test_cases_extra_args.append(arg)
elif arg == '--gtest_print_time':
# Ignore.
pass
elif 'interactive_ui_tests' in arg:
# Run this test in a single thread. It is useful to run it under
# run_test_cases so automatic flaky test workaround is still used.
run_test_cases_extra_args.append('-j1')
rest.append(arg)
elif 'browser_tests' in arg:
# Test cases in this executable fire up *a lot* of child processes,
# causing huge memory bottleneck. So use less than N-cpus jobs.
run_test_cases_extra_args.append('--use-less-jobs')
rest.append(arg)
else:
rest.append(arg)
# Use --jobs arg if exist.
for arg in args:
if arg.startswith('--jobs='):
run_test_cases_extra_args.append(arg)
break
return run_test_cases_extra_args, rest
def main():
parser = optparse.OptionParser()
group = optparse.OptionGroup(
parser, 'Compability flag with the old sharding_supervisor')
group.add_option(
'--no-color', action='store_true', help='Ignored')
group.add_option(
'--retry-failed', action='store_true', help='Ignored')
group.add_option(
'-t', '--timeout', type='int', help='Kept as --timeout')
group.add_option(
'--total-slaves', type='int', default=1, help='Converted to --index')
group.add_option(
'--slave-index', type='int', default=0, help='Converted to --shards')
parser.add_option_group(group)
group = optparse.OptionGroup(
parser, 'Options of run_test_cases.py passed through')
group.add_option(
'--retries', type='int', help='Kept as --retries')
group.add_option(
'--verbose', action='count', default=0, help='Kept as --verbose')
parser.add_option_group(group)
parser.disable_interspersed_args()
options, args = parser.parse_args()
swarm_client_dir = os.path.join(
ROOT_DIR, 'tools', 'swarm_client', 'googletest')
sys.path.insert(0, swarm_client_dir)
cmd = [
'--shards', str(options.total_slaves),
'--index', str(options.slave_index),
'--no-dump',
'--no-cr',
]
if options.timeout is not None:
cmd.extend(['--timeout', str(options.timeout)])
if options.retries is not None:
cmd.extend(['--retries', str(options.retries)])
if options.verbose is not None:
cmd.extend(['--verbose'] * options.verbose)
run_test_cases_extra_args, rest = pop_known_arguments(args)
import run_test_cases # pylint: disable=F0401
return run_test_cases.main(cmd + run_test_cases_extra_args + ['--'] + rest)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "53a55c2c655546d094f7fd5cf1d1afab",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 31.608247422680414,
"alnum_prop": 0.642204827136334,
"repo_name": "mogoweb/chromium-crosswalk",
"id": "99670718a32952849aba9dc97857e8366cdd9852",
"size": "3255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/sharding_supervisor/sharding_supervisor.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "54831"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40940503"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "182703853"
},
{
"name": "CSS",
"bytes": "799795"
},
{
"name": "DOT",
"bytes": "1873"
},
{
"name": "Java",
"bytes": "4807735"
},
{
"name": "JavaScript",
"bytes": "20714038"
},
{
"name": "Mercury",
"bytes": "10299"
},
{
"name": "Objective-C",
"bytes": "985558"
},
{
"name": "Objective-C++",
"bytes": "6205987"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1213389"
},
{
"name": "Python",
"bytes": "9735121"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1305641"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
} |
"""
The manager class for the blog models
"""
from django.db import models
from django.db.models.query import QuerySet
from django.db.models.query_utils import Q
from django.utils.timezone import now
class NewsItemQuerySet(QuerySet):
def published(self):
"""
Return only published entries
"""
from .models import NewsItem
qs = self
qs = qs.filter(status=NewsItem.PostStatus.published)
qs = qs.filter(
Q(publication_date__isnull=True) | Q(publication_date__lte=now()))
qs = qs.filter(Q(publication_end_date__isnull=True) | Q(
publication_end_date__gte=now()))
return qs
class NewsItemManager(models.Manager):
"""
Extra methods attached to ``BlogPost.objects`` .
"""
def get_queryset(self):
return NewsItemQuerySet(self.model, using=self._db)
def published(self):
"""
Return only published entries
"""
return self.get_queryset().published()
| {
"content_hash": "a175586026f466ea1d2e46475528f70e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 26.473684210526315,
"alnum_prop": 0.6312127236580517,
"repo_name": "jfterpstra/bluebottle",
"id": "9960a399da43773a0827c7c1d9d8614f416ac470",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bluebottle/news/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16556"
},
{
"name": "HTML",
"bytes": "173443"
},
{
"name": "JavaScript",
"bytes": "434"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "1694079"
},
{
"name": "Shell",
"bytes": "2951"
},
{
"name": "Smarty",
"bytes": "4317"
}
],
"symlink_target": ""
} |
import numpy as np
from threeML.plugins.XYLike import XYLike
from threeML.utils.histogram import Histogram
from threeML.plugins.OGIP.likelihood_functions import poisson_log_likelihood_ideal_bkg
from threeML.plugins.OGIP.likelihood_functions import half_chi2
__instrument_name = "n.a."
class HistLike(XYLike):
def __init__(self,name, histogram):
"""
Fit a 3ML Histogram such that the model is evaluated its integral over the histogram bins
:param name: plugin name
:param histogram: 3ML histogram
"""
assert isinstance(histogram,Histogram), "input must be a 3ML histogram"
self._histogram = histogram #type: Histogram
super(HistLike, self).__init__(name=name,
x=self._histogram.mid_points,
y=self._histogram.contents,
yerr=self._histogram.errors,
poisson_data=self._histogram.is_poisson)
def _get_diff_flux_and_integral(self):
n_point_sources = self._likelihood_model.get_number_of_point_sources()
# Make a function which will stack all point sources (HISTLike does not support spatial dimension)
def differential_flux(energies):
fluxes = self._likelihood_model.get_point_source_fluxes(0, energies)
# If we have only one point source, this will never be executed
for i in range(1, n_point_sources):
fluxes += self._likelihood_model.get_point_source_fluxes(i, energies)
return fluxes
# The following integrates the diffFlux function using Simpson's rule
# This assume that the intervals e1,e2 are all small, which is guaranteed
# for any reasonable response matrix, given that e1 and e2 are Monte-Carlo
# energies. It also assumes that the function is smooth in the interval
# e1 - e2 and twice-differentiable, again reasonable on small intervals for
# decent models. It might fail for models with too sharp features, smaller
# than the size of the monte carlo interval.
def integral(e1, e2):
# Simpson's rule
return (e2 - e1) / 6.0 * (differential_flux(e1)
+ 4 * differential_flux((e1 + e2) / 2.0)
+ differential_flux(e2))
return differential_flux, integral
def get_log_like(self):
"""
Return the value of the log-likelihood with the current values for the
parameters
"""
# Make a function which will stack all point sources (XYLike do not support spatial dimension)
expectation = self.get_model_flux()
if self._is_poisson:
# Poisson log-likelihood
return np.sum(poisson_log_likelihood_ideal_bkg(self._y, np.zeros_like(self._y), expectation))
else:
# Chi squared
chi2_ = half_chi2(self._y, self._yerr, expectation)
assert np.all(np.isfinite(chi2_))
return np.sum(chi2_) * (-1)
def get_model_flux(self):
_, integral_function = self._get_diff_flux_and_integral()
model = np.array([integral_function(xmin, xmax) for xmin, xmax in self._histogram.bin_stack])
return model
@property
def histogram(self):
return self._histogram | {
"content_hash": "44bb9ec07cc3c3272ee634bdc762a504",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 106,
"avg_line_length": 32.39622641509434,
"alnum_prop": 0.6086196854979615,
"repo_name": "volodymyrss/3ML",
"id": "884f53f159ef6e1c61c6e007cbeb0b6685461be0",
"size": "3434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threeML/plugins/HistLike.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1896"
},
{
"name": "Python",
"bytes": "1237912"
},
{
"name": "Shell",
"bytes": "6442"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import string
import libxml2
import libxslt
import re
import math
# Some globals
pixelsPerInch = 96.0
unitHash = { 'in': pixelsPerInch,
'cm': pixelsPerInch / 2.54,
'mm': pixelsPerInch / 25.4,
'pc': (pixelsPerInch / 72.0) * 12,
'pt': pixelsPerInch / 72.0,
'px': 1 }
# ======================================================================
def adjustColumnWidths(ctx, nodeset):
#
# Small check to verify the context is correcly accessed
#
try:
pctxt = libxslt.xpathParserContext(_obj=ctx)
ctxt = pctxt.context()
tctxt = ctxt.transformContext()
except:
pass
# Get the nominal table width
varString = lookupVariable(tctxt, "nominal.table.width", None)
if varString == None:
nominalWidth = 6 * pixelsPerInch;
else:
nominalWidth = convertLength(varString);
# Get the requested table width
tableWidth = lookupVariable(tctxt, "table.width", "100%")
foStylesheet = (tctxt.variableLookup("stylesheet.result.type", None) == "fo")
relTotal = 0
relParts = []
absTotal = 0
absParts = []
colgroup = libxml2.xmlNode(_obj = nodeset[0])
# If this is an foStylesheet, we've been passed a list of fo:table-columns.
# Otherwise we've been passed a colgroup that contains a list of cols.
if foStylesheet:
colChildren = colgroup
else:
colChildren = colgroup.children
col = colChildren
while col != None:
if foStylesheet:
width = col.prop("column-width")
else:
width = col.prop("width")
if width == None:
width = "1*"
relPart = 0.0
absPart = 0.0
starPos = string.find(width, "*")
if starPos >= 0:
relPart, absPart = string.split(width, "*", 2)
relPart = float(relPart)
relTotal = relTotal + float(relPart)
else:
absPart = width
pixels = convertLength(absPart)
absTotal = absTotal + pixels
relParts.append(relPart)
absParts.append(pixels)
col = col.__next__
# Ok, now we have the relative widths and absolute widths in
# two parallel arrays.
#
# - If there are no relative widths, output the absolute widths
# - If there are no absolute widths, output the relative widths
# - If there are a mixture of relative and absolute widths,
# - If the table width is absolute, turn these all into absolute
# widths.
# - If the table width is relative, turn these all into absolute
# widths in the nominalWidth and then turn them back into
# percentages.
widths = []
if relTotal == 0:
for absPart in absParts:
if foStylesheet:
inches = absPart / pixelsPerInch
widths.append("%4.2fin" % inches)
else:
widths.append("%d" % absPart)
elif absTotal == 0:
for relPart in relParts:
rel = relPart / relTotal * 100
widths.append(rel)
widths = correctRoundingError(widths)
else:
pixelWidth = nominalWidth
if string.find(tableWidth, "%") < 0:
pixelWidth = convertLength(tableWidth)
if pixelWidth <= absTotal:
print("Table is wider than table width")
else:
pixelWidth = pixelWidth - absTotal
absTotal = 0
for count in range(len(relParts)):
rel = relParts[count] / relTotal * pixelWidth
relParts[count] = rel + absParts[count]
absTotal = absTotal + rel + absParts[count]
if string.find(tableWidth, "%") < 0:
for count in range(len(relParts)):
if foStylesheet:
pixels = relParts[count]
inches = pixels / pixelsPerInch
widths.append("%4.2fin" % inches)
else:
widths.append(relParts[count])
else:
for count in range(len(relParts)):
rel = relParts[count] / absTotal * 100
widths.append(rel)
widths = correctRoundingError(widths)
# Danger, Will Robinson! In-place modification of the result tree!
# Side-effect free? We don' need no steenkin' side-effect free!
count = 0
col = colChildren
while col != None:
if foStylesheet:
col.setProp("column-width", widths[count])
else:
col.setProp("width", widths[count])
count = count+1
col = col.__next__
return nodeset
def convertLength(length):
# Given "3.4in" return the width in pixels
global pixelsPerInch
global unitHash
m = re.search('([+-]?[\d\.]+)(\S+)', length)
if m != None and m.lastindex > 1:
unit = pixelsPerInch
if m.group(2) in unitHash:
unit = unitHash[m.group(2)]
else:
print("Unrecognized length: " + m.group(2))
pixels = unit * float(m.group(1))
else:
pixels = 0
return pixels
def correctRoundingError(floatWidths):
# The widths are currently floating point numbers, we have to truncate
# them back to integers and then distribute the error so that they sum
# to exactly 100%.
totalWidth = 0
widths = []
for width in floatWidths:
width = math.floor(width)
widths.append(width)
totalWidth = totalWidth + math.floor(width)
totalError = 100 - totalWidth
columnError = totalError / len(widths)
error = 0
for count in range(len(widths)):
width = widths[count]
error = error + columnError
if error >= 1.0:
adj = math.floor(error)
error = error - adj
widths[count] = "%d%%" % (width + adj)
else:
widths[count] = "%d%%" % width
return widths
def lookupVariable(tctxt, varName, default):
varString = tctxt.variableLookup(varName, None)
if varString == None:
return default
# If it's a list, get the first element
if type(varString) == type([]):
varString = varString[0]
# If it's not a string, it must be a node, get its content
if type(varString) != type(""):
varString = varString.content
return varString
# ======================================================================
# Random notes...
#once you have a node which is a libxml2 python xmlNode wrapper all common
#operations are possible:
# .children .last .parent .next .prev .doc for navigation
# .content .type for introspection
# .prop("attribute_name") to lookup attribute values
# # Now make a nodeset to return
# # Danger, Will Robinson! This creates a memory leak!
# newDoc = libxml2.newDoc("1.0")
# newColGroup = newDoc.newDocNode(None, "colgroup", None)
# newDoc.addChild(newColGroup)
# col = colgroup.children
# while col != None:
# newCol = newDoc.newDocNode(None, "col", None)
# newCol.copyPropList(col);
# newCol.setProp("width", "4")
# newColGroup.addChild(newCol)
# col = col.next
| {
"content_hash": "e0ab8aed95c43c8946f1ec51b5be2f1c",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 81,
"avg_line_length": 30.252100840336134,
"alnum_prop": 0.575,
"repo_name": "timj/scons",
"id": "e7b8cfa9f71c35b217ad495170132f44487b6753",
"size": "7282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/docbook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "593"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7393581"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52480"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import logging
from functools import wraps
import numpy as np
from matplotlib.figure import Figure
from ..external.modest_image import extract_matched_slices
from ..core.exceptions import IncompatibleAttribute
from ..core.data import Data
from ..core.util import lookup_class
from ..core.subset import Subset, RoiSubsetState
from ..core.roi import PolygonalROI
from ..core.callback_property import (
callback_property, CallbackProperty)
from ..core.edit_subset_mode import EditSubsetMode
from .viz_client import VizClient, init_mpl
from .layer_artist import (ScatterLayerArtist, LayerArtistContainer,
ImageLayerArtist, SubsetImageLayerArtist,
RGBImageLayerArtist)
def requires_data(func):
"""Decorator that checks an ImageClient for a non-null display_data
attribute. Only executes decorated function if present"""
@wraps(func)
def result(*args, **kwargs):
if args[0].display_data is None:
return
return func(*args, **kwargs)
return result
class ImageClient(VizClient):
display_data = CallbackProperty(None)
display_attribute = CallbackProperty(None)
def __init__(self, data, figure=None, axes=None, artist_container=None):
if axes is not None:
raise ValueError("ImageClient does not accept an axes")
figure, axes = init_mpl(figure, axes, wcs=True)
VizClient.__init__(self, data)
self.artists = artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self._slice = None
self._view_window = None
self._view = None
self._image = None
self._override_image = None
self._ax = axes
self._ax.get_xaxis().set_ticks([])
self._ax.get_yaxis().set_ticks([])
self._figure = figure
self._norm_cache = {}
# custom axes formatter
def format_coord(x, y):
data = self.display_data
if data is None:
# MPL default method
return type(self._ax).format_coord(self._ax, x, y)
info = self.point_details(x, y)
return ' '.join(info['labels'])
self._ax.format_coord = format_coord
self._cid = self._ax.figure.canvas.mpl_connect('button_release_event',
self.check_update)
if hasattr(self._ax.figure.canvas, 'homeButton'):
# test code doesn't always use Glue's custom FigureCanvas
self._ax.figure.canvas.homeButton.connect(self.check_update)
def point_details(self, x, y):
data = self.display_data
pix = self._pixel_coords(x, y)
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
labels = ['%s=%s' % (data.get_world_component_id(i).label, w)
for i, w in enumerate(world)]
view = []
for p, s in zip(pix, data.shape):
p = int(p)
if not (0 <= p < s):
value = None
break
view.append(slice(p, p + 1))
else:
if self._override_image is None:
value = self.display_data[self.display_attribute, view]
else:
value = self._override_image[int(y), int(x)]
value = value.ravel()[0]
return dict(pix=pix, world=world, labels=labels, value=value)
@callback_property
def slice(self):
"""
Returns a tuple describing the current slice through the data
The tuple has length equal to the dimensionality of the display
data. Each entry is either:
'x' if the dimension is mapped to the X image axis
'y' if the dimension is mapped to the Y image axis
a number, indicating which fixed slice the dimension is restricted to
"""
if self._slice is not None:
return self._slice
if self.display_data is None:
return tuple()
ndim = self.display_data.ndim
if ndim == 1:
self._slice = ('x',)
elif ndim == 2:
self._slice = ('y', 'x')
else:
self._slice = (0,) * (ndim - 2) + ('y', 'x')
return self._slice
@slice.setter
def slice(self, value):
if self.slice == tuple(value):
return
relim = value.index('x') != self._slice.index('x') or \
value.index('y') != self._slice.index('y')
self._slice = tuple(value)
self._clear_override()
self._update_axis_labels()
self._update_data_plot(relim=relim)
self._update_subset_plots()
self._redraw()
@property
def axes(self):
return self._ax
@property
def is_3D(self):
"""
Returns True if the display data has 3 dimensions """
if not self.display_data:
return False
return len(self.display_data.shape) == 3
@property
def slice_ind(self):
"""
For 3D data, returns the pixel index of the current slice.
Otherwise, returns None
"""
if self.is_3D:
for s in self.slice:
if s not in ['x', 'y']:
return s
return None
@property
def image(self):
return self._image
@requires_data
def override_image(self, image):
"""Temporarily override the current slice view with another
image (i.e., an aggregate)
"""
self._override_image = image
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerArtist):
a.override_image(image)
self._update_data_plot()
self._redraw()
def _clear_override(self):
self._override_image = None
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerArtist):
a.clear_override()
@slice_ind.setter
def slice_ind(self, value):
if self.is_3D:
slc = [s if s in ['x', 'y'] else value for s in self.slice]
self.slice = slc
self._update_data_plot()
self._update_subset_plots()
self._redraw()
else:
raise IndexError("Can only set slice_ind for 3D images")
def can_image_data(self, data):
return data.ndim > 1
def _ensure_data_present(self, data):
if data not in self.artists:
self.add_layer(data)
def check_update(self, *args):
logging.getLogger(__name__).debug("check update")
vw = _view_window(self._ax)
if vw != self._view_window:
logging.getLogger(__name__).debug("updating")
self._update_data_plot()
self._update_subset_plots()
self._redraw()
self._view_window = vw
def set_data(self, data, attribute=None):
if not self.can_image_data(data):
return
self._ensure_data_present(data)
self._slice = None
attribute = attribute or _default_component(data)
self.display_data = data
self.display_attribute = attribute
self._update_axis_labels()
self._update_data_plot(relim=True)
self._update_subset_plots()
self._redraw()
def _update_wcs_axes(self, data, slc):
wcs = getattr(data.coords, 'wcs', None)
if wcs is not None and hasattr(self.axes, 'reset_wcs'):
self.axes.reset_wcs(wcs, slices=slc[::-1])
@requires_data
def _update_axis_labels(self):
labels = _axis_labels(self.display_data, self.slice)
self._update_wcs_axes(self.display_data, self.slice)
self._ax.set_xlabel(labels[1])
self._ax.set_ylabel(labels[0])
def set_attribute(self, attribute):
if not self.display_data or \
attribute not in self.display_data.component_ids():
raise IncompatibleAttribute(
"Attribute not in data's attributes: %s" % attribute)
if self.display_attribute is not None:
self._norm_cache[self.display_attribute] = self.get_norm()
self.display_attribute = attribute
if attribute in self._norm_cache:
self.set_norm(norm=self._norm_cache[attribute])
else:
self.clear_norm()
self._update_data_plot()
self._redraw()
def _redraw(self):
"""
Re-render the screen
"""
self._ax.figure.canvas.draw()
@requires_data
def set_norm(self, **kwargs):
for a in self.artists[self.display_data]:
a.set_norm(**kwargs)
self._update_data_plot()
self._redraw()
@requires_data
def clear_norm(self):
for a in self.artists[self.display_data]:
a.clear_norm()
@requires_data
def get_norm(self):
a = self.artists[self.display_data][0]
return a.norm
@requires_data
def set_cmap(self, cmap):
for a in self.artists[self.display_data]:
a.cmap = cmap
a.redraw()
def _build_view(self, matched=False):
att = self.display_attribute
shp = self.display_data.shape
shp_2d = _2d_shape(shp, self.slice)
x, y = np.s_[:], np.s_[:]
if matched:
v = extract_matched_slices(self._ax, shp_2d)
x = slice(v[0], v[1], v[2])
y = slice(v[3], v[4], v[5])
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
@requires_data
def _update_data_plot(self, relim=False):
"""
Re-sync the main image and its subsets
"""
if relim:
self.relim()
view = self._build_view(matched=True)
self._image = self.display_data[view]
transpose = self.slice.index('x') < self.slice.index('y')
self._view = view
for a in list(self.artists):
if (not isinstance(a, ScatterLayerArtist)) and \
a.layer.data is not self.display_data:
self.artists.remove(a)
else:
a.update(view, transpose)
for a in self.artists[self.display_data]:
a.update(view, transpose=transpose)
def relim(self):
shp = _2d_shape(self.display_data.shape, self.slice)
self._ax.set_xlim(0, shp[1])
self._ax.set_ylim(0, shp[0])
def _update_subset_single(self, s, redraw=False):
"""
Update the location and visual properties
of each point in a single subset
Parameters:
----------
s: A subset instance
The subset to refresh.
"""
logging.getLogger(__name__).debug("update subset single: %s", s)
self._update_scatter_layer(s)
if s not in self.artists:
return
if s.data is not self.display_data:
return
view = self._build_view(matched=True)
transpose = self.slice.index('x') < self.slice.index('y')
for a in self.artists[s]:
a.update(view, transpose)
if redraw:
self._redraw()
@property
def _slice_ori(self):
if not self.is_3D:
return None
for i, s in enumerate(self.slice):
if s not in ['x', 'y']:
return i
@requires_data
def apply_roi(self, roi):
subset_state = RoiSubsetState()
xroi, yroi = roi.to_polygon()
x, y = self._get_plot_attributes()
subset_state.xatt = x
subset_state.yatt = y
subset_state.roi = PolygonalROI(xroi, yroi)
mode = EditSubsetMode()
mode.update(self.data, subset_state, focus_data=self.display_data)
def _remove_subset(self, message):
self.delete_layer(message.sender)
def delete_layer(self, layer):
if layer not in self.artists:
return
for a in self.artists.pop(layer):
a.clear()
if layer is self.display_data:
self.display_data = None
if isinstance(layer, Data):
for subset in layer.subsets:
self.delete_layer(subset)
self._redraw()
def _remove_data(self, message):
self.delete_layer(message.data)
for s in message.data.subsets:
self.delete_layer(s)
def init_layer(self, layer):
# only auto-add subsets if they are of the main image
if isinstance(layer, Subset) and layer.data is not self.display_data:
return
self.add_layer(layer)
def rgb_mode(self, enable=None):
""" Query whether RGB mode is enabled, or toggle RGB mode
:param enable: bool, or None
If True or False, explicitly enable/disable RGB mode.
If None, check if RGB mode is enabled
:rtype: LayerArtist or None
If RGB mode is enabled, returns an RGBImageLayerArtist
If enable=False, return the new ImageLayerArtist
"""
# XXX need to better handle case where two RGBImageLayerArtists
# are created
if enable is None:
for a in self.artists:
if isinstance(a, RGBImageLayerArtist):
return a
return None
result = None
layer = self.display_data
if enable:
layer = self.display_data
v = self._view or self._build_view(matched=True)
a = RGBImageLayerArtist(layer, self._ax, last_view=v)
for artist in self.artists.pop(layer):
artist.clear()
self.artists.append(a)
result = a
else:
for artist in list(self.artists):
if isinstance(artist, RGBImageLayerArtist):
artist.clear()
self.artists.remove(artist)
result = self.add_layer(layer)
self._update_data_plot()
self._redraw()
return result
def add_layer(self, layer):
if layer in self.artists:
return self.artists[layer][0]
if layer.data not in self.data:
raise TypeError("Data not managed by client's data collection")
if not self.can_image_data(layer.data):
# if data is 1D, try to scatter plot
if len(layer.data.shape) == 1:
return self.add_scatter_layer(layer)
logging.getLogger(__name__).warning(
"Cannot visualize %s. Aborting", layer.label)
return
if isinstance(layer, Data):
result = ImageLayerArtist(layer, self._ax)
self.artists.append(result)
for s in layer.subsets:
self.add_layer(s)
elif isinstance(layer, Subset):
result = SubsetImageLayerArtist(layer, self._ax)
self.artists.append(result)
self._update_subset_single(layer)
else:
raise TypeError("Unrecognized layer type: %s" % type(layer))
return result
def add_scatter_layer(self, layer):
logging.getLogger(
__name__).debug('Adding scatter layer for %s' % layer)
if layer in self.artists:
logging.getLogger(__name__).debug('Layer already present')
return
result = ScatterLayerArtist(layer, self._ax)
self.artists.append(result)
self._update_scatter_layer(layer)
return result
@requires_data
def _update_scatter_layer(self, layer):
xatt, yatt = self._get_plot_attributes()
for a in self.artists[layer]:
if not isinstance(a, ScatterLayerArtist):
continue
a.xatt = xatt
a.yatt = yatt
if self.is_3D:
zatt = self.display_data.get_pixel_component_id(
self._slice_ori)
subset = (
zatt > self.slice_ind) & (zatt <= self.slice_ind + 1)
a.emphasis = subset
else:
a.emphasis = None
a.update()
a.redraw()
self._redraw()
@requires_data
def _get_plot_attributes(self):
x, y = _slice_axis(self.display_data.shape, self.slice)
ids = self.display_data.pixel_component_ids
return ids[x], ids[y]
def _pixel_coords(self, x, y):
"""From a slice coordinate (x,y), return the full (possibly
>2D) numpy index into the full data
*Note*
The inputs to this function are the reverse of numpy convention
(horizontal axis first, then vertical)
*Returns*
Either (x,y) or (x,y,z)
"""
result = list(self.slice)
result[result.index('x')] = x
result[result.index('y')] = y
return result
def is_visible(self, layer):
return all(a.visible for a in self.artists[layer])
def set_visible(self, layer, state):
for a in self.artists[layer]:
a.visible = state
def set_slice_ori(self, ori):
if not self.is_3D:
raise IndexError("Can only set slice_ori for 3D images")
if ori == 0:
self.slice = (0, 'y', 'x')
elif ori == 1:
self.slice = ('y', 0, 'x')
elif ori == 2:
self.slice = ('y', 'x', 0)
else:
raise ValueError("Orientation must be 0, 1, or 2")
def restore_layers(self, layers, context):
""" Restore a list of glue-serialized layer dicts """
for layer in layers:
c = lookup_class(layer.pop('_type'))
props = dict((k, v if k == 'stretch' else context.object(v))
for k, v in layer.items())
l = props['layer']
if c == ScatterLayerArtist:
l = self.add_scatter_layer(l)
elif c == ImageLayerArtist or c == SubsetImageLayerArtist:
if isinstance(l, Data):
self.set_data(l)
l = self.add_layer(l)
elif c == RGBImageLayerArtist:
r = props.pop('r')
g = props.pop('g')
b = props.pop('b')
self.display_data = l
self.display_attribute = r
l = self.rgb_mode(True)
l.r = r
l.g = g
l.b = b
else:
raise ValueError("Cannot restore layer of type %s" % l)
l.properties = props
def _2d_shape(shape, slc):
"""Return the shape of the 2D slice through a 2 or 3D image
"""
# - numpy ordering here
return shape[slc.index('y')], shape[slc.index('x')]
def _slice_axis(shape, slc):
"""
Return a 2-tuple of which axes in a dataset lie along the
x and y axes of the image
:param shape: Shape of original data. tuple of ints
:param slc: Slice through the data, tuple of ints, 'x', and 'y'
"""
return slc.index('x'), slc.index('y')
def _axis_labels(data, slc):
shape = data.shape
names = [data.get_world_component_id(i).label
for i in range(len(shape))]
return names[slc.index('y')], names[slc.index('x')]
def _view_window(ax):
""" Return a tuple describing the view window of an axes object.
The contents should not be used directly, Rather, several
return values should be compared with == to determine if the
window has been panned/zoomed
"""
ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
xlim, ylim = ax.get_xlim(), ax.get_ylim()
result = xlim[0], ylim[0], xlim[1], ylim[1], ext[0], ext[1]
logging.getLogger(__name__).debug("view window: %s", result)
return result
def _default_component(data):
"""Choose a default ComponentID to display for data
Returns PRIMARY if present
"""
cid = data.find_component_id('PRIMARY')
if cid is not None:
return cid
return data.component_ids()[0]
| {
"content_hash": "edf8ad4e0fa0d72d330f02b67f2402dc",
"timestamp": "",
"source": "github",
"line_count": 638,
"max_line_length": 78,
"avg_line_length": 31.365203761755485,
"alnum_prop": 0.5567937634301134,
"repo_name": "bsipocz/glue",
"id": "871400233b51748d931c6493a3d4b373a3738e4a",
"size": "20011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/clients/image_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Click Command definition for the subscription command group which includes
commands for managing subscriptions to indications on WBEM servers including
adding, displaying, and remvoving destinations, filters, and subscriptions from
a WBEM server.
This command group is based on the pywbem WBEMSubscriptionManager class.
"""
from __future__ import absolute_import
import re
import click
from pywbem import CIMError, Error, WBEMSubscriptionManager, \
CIM_ERR_ALREADY_EXISTS
from pywbem._subscription_manager import SUBSCRIPTION_CLASSNAME, \
DESTINATION_CLASSNAME, FILTER_CLASSNAME
from .pywbemcli import cli
from .._options import add_options, help_option
from ._display_cimobjects import display_cim_objects, fold_strings
from .._click_extensions import PywbemtoolsGroup, PywbemtoolsCommand, \
CMD_OPTS_TXT, GENERAL_OPTS_TXT, SUBCMD_HELP_TXT, MutuallyExclusiveOption
from ._common import pick_one_index_from_list, sort_cimobjects, \
verify_operation
from .._output_formatting import validate_output_format, \
output_format_is_table, output_format_is_cimobject, format_table
DEFAULT_QUERY_LANGUAGE = 'WQL'
DEFAULT_SUB_MGR_ID = "defaultpywbemcliSubMgr"
OWNED_STR = True
ALL_STR = 'all'
ownedadd_flag_option = [ # pylint: disable=invalid-name
click.option("--owned/--permanent", default=True,
help=u"Defines whether an owned or permanent filter, "
u"destination, or subscription is to be added. "
u"Default: owned"),
]
ownedremove_flag_option = [ # pylint: disable=invalid-name
click.option("--owned/--permanent", default=True,
help=u"Defines whether an owned or permanent filter, "
u"destination, or subscription is to be removed. "
u"Default: owned"),
]
ownedlist_choice_option = [ # pylint: disable=invalid-name
click.option("--type", default='all',
type=click.Choice(['owned', 'permanent', 'all'],
case_sensitive=False),
help=u"Defines whether the command filters owned, "
u" permanent, or all objects for the response. "
u"Default: all"),
]
names_only_option = [ # pylint: disable=invalid-name
click.option('--names-only', '--no', is_flag=True, required=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["summary", 'detail'],
show_mutually_exclusive=True,
help=u'Show the CIMInstanceName elements of the instances. '
u'This only applies when the --output-format is one '
u'of the CIM object options (ex. mof.')
]
detail_option = [ # pylint: disable=invalid-name
click.option('-d', '--detail', is_flag=True, required=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["summary", 'names-only'],
show_mutually_exclusive=True,
help=u"Show more detailed information. Otherwise only "
u"non-null or predefined property values are displayed. It "
u"applies to both MOF and TABLE output formats.")
]
detail_subscription_option = [ # pylint: disable=invalid-name
click.option('-d', '--detail', is_flag=True, required=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["summary", 'names-only'],
show_mutually_exclusive=True,
help=u"Show more detailed information including MOF of "
"referenced listeners and filters. Otherwise only "
u"non-null or predefined property values are displayed. The "
u"extra properties applies to both MOF and TABLE output "
u"formats.")
]
summary_option = [ # pylint: disable=invalid-name
click.option('-s', '--summary', is_flag=True, required=False,
cls=MutuallyExclusiveOption,
mutually_exclusive=["detail", 'names-only'],
show_mutually_exclusive=True,
help=u'Show only summary count of instances.')
]
verify_remove_option = [ # pylint: disable=invalid-name
click.option('-v', '--verify', is_flag=True, default=False,
help=u'Prompt user to verify instances to be removed before '
u'request is sent to WBEM server.')
]
select_option = [ # pylint: disable=invalid-name
click.option('--select', is_flag=True, default=False,
help=u'Prompt user to select from multiple objects '
u'that match the IDENTITY. Otherwise, if the command '
u'finds multiple instance that match the IDENTITY, the '
u'operation fails.')
]
##################################################################
#
# Subcommand Click definitions
#
###################################################################
@cli.group('subscription', cls=PywbemtoolsGroup,
options_metavar=GENERAL_OPTS_TXT, subcommand_metavar=SUBCMD_HELP_TXT)
@add_options(help_option)
def subscription_group():
"""
Command group to manage WBEM indication subscriptions.
This group uses the pywbem subscription manager to create, view, and remove
CIM Indication subscriptions for a WBEM Server.
In addition to the command-specific options shown in this help text, the
general options (see 'pywbemcli --help') can also be specified before the
command. These are NOT retained after the command is executed.
"""
pass
@subscription_group.command('add-destination', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('identity', type=str, metavar='IDENTITY', required=True,)
@click.option('-l', '--listener-url', type=str, metavar='URL',
help=u'Defines the URL of the target listener in the format: '
u'[SCHEME://]HOST:PORT. SCHEME must be "https" (default) '
u'or "http". HOST is a short or long hostname or literal '
u'IPV4/v6 address. PORT is a positive integer and is '
u'required')
@add_options(ownedadd_flag_option)
@add_options(help_option)
@click.pass_obj
def subscription_add_destination(context, identity, **options):
"""
Add new listener destination.
This command adds a listener destination to be the target of indications
sent by a WBEM server a WBEM server, by adding an instance of the CIM class
"CIM_ListenerDestinationCIMXML" in the Interop namespace of the WBEM
server.
A listener destination defines the target of a WBEM indication
listener including URI of the listener including the listener port.
The required IDENTITY argument along with the --owned/--permanent option
define the ``Name`` key property of the new instance. If the instance is to
be owned by the current SubscriptionManager, pywbemcli creates a 'Name'
property value with the format: "pywbemdestination:"
<SubscriptionManagerID> ":" <IDENTITY>. If the destination instance is to
be permanent, the value of the IDENTITY argument becomes the value of the
'Name' property.
Owned destinations are added or updated conditionally: If the destination
instance to be added is already registered with this subscription manager
and has the same property values, it is not created or modified. If an
instance with this path and properties does not exist yet (the normal
case), it is created on the WBEM server.
Permanent listener destinations are created unconditionally, and it is up to
the user to ensure that such an instance does not already exist.
If the --verbose general option is set, the created instance is displayed.
"""
context.execute_cmd(
lambda: cmd_subscription_add_destination(context, identity, options))
@subscription_group.command('add-filter', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('identity', type=str, metavar='IDENTITY', required=True,)
@click.option('-q', '--query', type=str, metavar='FILTER',
required=True,
help=u'Filter query definition. This is a SELECT '
u'statement in the query language defined in the '
u'filter-query-language parameter')
@click.option('--query-language', type=str, metavar='TEXT',
required=False,
default=DEFAULT_QUERY_LANGUAGE,
help=u"Filter query language for this subscription The query "
u"languages normally implemented are 'DMTF:CQL' and 'WQL' . "
u" Default: {0}".format(DEFAULT_QUERY_LANGUAGE))
@click.option('--source-namespaces', type=str, metavar='TEXT',
required=False, default=None,
multiple=True,
help=u"The namespace(s) for which the query is defined. Multiple "
u"values may be defined with a single comma-separated "
u"string of namespaces or multiple options. If defined the "
u"namespaces will be inserted into the SourceNamespaces "
u"property. Otherwise the property will not be created and "
u"the WBEM server typically use the Interop namespace for "
u"the indication filter.")
@add_options(ownedadd_flag_option)
@add_options(help_option)
@click.pass_obj
def subscription_add_filter(context, identity, **options):
"""
Add new indication filter.
This command adds an indication filter to a WBEM server, by
creating an indication filter instance (CIM class
"CIM_IndicationFilter") in the Interop namespace of the server.
A indication listener defines the query and query language to be used
by the WBEM server to create indications for an indication subscription.
The required IDENTITY argument of the command and the --owned/--permanent
option defines the 'Name' key property of the the created instance. If the
the instance is to be owned by the current SubscriptionManager, pywbemcli
indirectly specifies the 'Name' property value with the format:
"pywbemfilter:" "<SubscriptionManagerID>" ":" <identity>``. If the
destination instance is to be permanent, the value of the IDENTITY argument
directly becomes the value of the Name property.
Owned indication filters are added or updated conditionally: If the
indication filter instance to be added is already registered with
this subscription manager and has the same property values, it is not
created or modified. If it has the same path but different property
values, it is modified to get the desired property values. If an
instance with this path does not exist yet (the normal case), it is
created.
Permanent indication filters are created unconditionally; it is
up to the user to ensure that such an instance does not exist yet.
If the --verbose general option is set, the created instance is displayed.
"""
context.execute_cmd(
lambda: cmd_subscription_add_filter(context, identity, options))
@subscription_group.command('add-subscription', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('destination-identity', type=str, metavar='DESTINATIONID',
required=True,)
@click.argument('filter-identity', type=str, metavar='FILTERID', required=True,)
@add_options(ownedadd_flag_option)
@add_options(select_option)
@add_options(help_option)
@click.pass_obj
def subscription_add_subscription(context, destination_identity,
filter_identity, **options):
"""
Add new indication subscription.
Adds an indication subscription to the current WBEM server for a particular
DESTINATIONID and FILTERID. The command creates an instance of CIM
association class "CIM_IndicationSubscription" in the Interop namespace of
the server.
The destination and filter instances to be used in the subscription is
based on the DESTINATIONID and FILTERID arguments which define the
the 'Handler' and 'Filter' reference properties of the subscription
instance to be created.
The required DESTINATIONID argument defines the existing destination
instance that will be attached to the 'Handler' reference of the
association class. This argument may consist of either the value of the
'Name' property of the target destination instance or the identity of that
instance. The identity is the full value of the 'Name' property for
permanent destinations and is a component of the 'Name' property for owned
instances. If just the identity is used, this will result in multiple
destinations being found if the same string is defined as the identity of
an owned and permanent destination.
The required FILTERID argument defines the existing filter instance that
will be attached to the 'Filter' reference of the association class. This
argument may consist of either the value of the 'Name' property of the
target filter instance or the identity of that instance. The identity is
the full value of the 'Name' property for permanent filters and is a
component of the 'Name' property for owned instances. If just the identity
is used, this will result in multiple filters being found if the same
string is defined as the identity of an owned and permanent filter.
When creating permanent subscriptions, the indication filter and the
listener destinations must not be owned. for owned subscriptions,
indication filter and listener destination may be either owned or permanent.
Owned subscriptions are added or updated conditionally: If the
subscription instance to be added is already registered with
this subscription manager and has the same path, it is not
created.
Permanent subscriptions are created unconditionally, and it is up to
the user to ensure that such an instance does not already exist.
Upon successful return of this method, the added subscription is active on
the WBEM server, so that the specified WBEM listeners may immediately
receive indications.
If the --verbose general option is set, the created instance is displayed.
"""
# pylint: disable=line-too-long
context.execute_cmd(
lambda: cmd_subscription_add_subscription(context, destination_identity, filter_identity, options)) # noqa: E501
@subscription_group.command('list', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(ownedlist_choice_option)
@add_options(summary_option)
@add_options(detail_option)
@add_options(help_option)
@click.pass_obj
def subscription_list(context, **options):
"""
Display indication subscriptions overview.
This command provides an overview of the count of subscriptions, filters,
and destinations retrieved from the WBEM server. The level of detail
depends on the --summary and --detail options. '--summary' displays only
a single count for each; --detail displays a table for the instances
of each. The default is to display a table of the count of owned and
permanent for each.
"""
context.execute_cmd(lambda: cmd_subscription_list(context, options))
@subscription_group.command('list-destinations', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(ownedlist_choice_option)
@add_options(detail_option)
@add_options(names_only_option)
@add_options(summary_option)
@add_options(help_option)
@click.pass_obj
def subscription_list_destinations(context, **options):
"""
Display indication listeners on the WBEM server.
Display existing CIM indication listener destinations on the current
connection. The listener destinations to be displayed can be filtered
by the owned choice option (owned, permanent, all).
The data display is determined by the --detail, --names_only, and --summary
options and can be displayed as either a table or CIM objects (ex. mof)
format using the --output general option (ex. --output mof).
"""
context.execute_cmd(
lambda: cmd_subscription_list_destinations(context, options))
@subscription_group.command('list-filters', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(ownedlist_choice_option)
@add_options(detail_option)
@add_options(names_only_option)
@add_options(summary_option)
@add_options(help_option)
@click.pass_obj
def subscription_list_filters(context, **options):
"""
Display indication filters on the WBEM server.
Display existing CIM indication filters (CIM_IndicationFilter class) on the
current connection. The indication filters to be displayed can be filtered
by the owned choice option (owned, permanent, all).
The data display is determined by the --detail, --names-only, and --summary
options and can be displayed as either a table or CIM objects (ex. mof)
format using the --output general option (ex. --output mof).
"""
context.execute_cmd(lambda: cmd_subscription_list_filters(context, options))
@subscription_group.command('list-subscriptions', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(ownedlist_choice_option)
@add_options(detail_subscription_option)
@add_options(names_only_option)
@add_options(summary_option)
@add_options(help_option)
@click.pass_obj
def subscription_list_subscriptions(context, **options):
"""
Display indication subscriptions on the WBEM server.
Displays information on indication subscriptions on the WBEM server,
filtering the subscriptions to be displayed can be filtered by the owned
choice option (owned, permanent, all).
The default display is a table of information from the associated
Filter and Handler instances
The data display is determined by the --detail, --names-only, and --summary
options and can be displayed as either a table or CIM objects (ex. mof)
format using the --output general option (ex. --output mof).
"""
context.execute_cmd(
lambda: cmd_subscription_list_subscriptions(context, options))
@subscription_group.command('remove-destination', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('identity', type=str, metavar='IDENTITY', required=True)
@add_options(ownedremove_flag_option)
@add_options(select_option)
@add_options(help_option)
@add_options(verify_remove_option)
@click.pass_obj
def subscription_remove_destination(context, identity, **options):
"""
Remove a listener destination from the WBEM server.
Removes a listener destination instance (CIM_ListenerDestinationCIMXML)
from the WBEM server where the instance to be removed is identified by the
IDENTITY argument and optional owned option of the command.
The required IDENTITY argument may be the value of the IDENTITY used to
create the destination or may be the full value of the destination 'Name'
property. This is the value of the 'Name' property for permanent
destinations and a component of the 'Name' property for owned destinations.
If the instance is owned by the current pywbem SubscriptionManager,
pywbemcli indirectly specifies the Name property value with the format:
"pywbemdestination:" "<SubscriptionManagerID>" ":" <IDENTITY>``. If the
destination instance is permanent, the value of the IDENTITY argument
is the value of the Name property.
Some listener_destination instances on a server may be static in which
case the server should generate an exception. Pywbemcli has no way to
identify these static destinations and they will appear as permanent
destination instances.
The --select option can be used if, for some reason, the IDENTITY and
ownership returns multiple instances. This should only occur in rare cases
where destination instances have been created by other tools. If the
--select option is not used pywbemcli displays the paths of the instances
and terminates the
command.
"""
context.execute_cmd(
lambda: cmd_subscription_remove_destination(context, identity, options))
@subscription_group.command('remove-filter', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('identity', type=str, metavar='IDENTITY', required=True)
@add_options(ownedremove_flag_option)
@add_options(select_option)
@add_options(verify_remove_option)
@add_options(help_option)
@click.pass_obj
def subscription_remove_filter(context, identity, **options):
"""
Remove an indication filter from the WBEM server.
Removes a single indication filter instance (CIM_IndicationFilter class)
from the WBEM server where the instance to be removed is identified by the
IDENTITY argument and optional --owned option of the command.
The required IDENTITY argument may be the value of the IDENTITY
used to create the filter or may be the full value of the filter Name
property. For permanent filters the value of the Name property is required;
for owned destinations the IDENTITY component of the Name property is
sufficient.
If the instance is owned by the current pywbem SubscriptionManager,
pywbemcli indirectly specifies the Name property value with the format:
"pywbemfilter:" "<SubscriptionManagerID>" ":" <IDENTITY>``. If the
destination instance is permanent, the value of the IDENTITY argument
is the value of the Name property.
The --select option can be used if, the IDENTITY and ownership returns
multiple instances. This should only occur in rare cases where filter
instances have been created by other tools. If the --select option is not
used pywbemcli displays the paths of the instances and terminates the
command.
"""
context.execute_cmd(
lambda: cmd_subscription_remove_filter(context, identity, options))
@subscription_group.command('remove-subscription', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@click.argument('destination-identity', type=str, metavar='DESTINATIONID',
required=True,)
@click.argument('filter-identity', type=str, metavar='FILTERID', required=True,)
@add_options(verify_remove_option)
@click.option('--remove-associated-instances', is_flag=True,
default=False,
help=u'Attempt to remove the instances associated with this '
u'subscription. They will only be removed if they do not '
u'participate in any other associations.')
@add_options(select_option)
@add_options(help_option)
@click.pass_obj
def subscription_remove_subscription(context, destination_identity,
filter_identity, **options):
"""
Remove indication subscription from the WBEM server.
This command removes an indication subscription instance from the
WBEM server.
The selection of subscription to be removed is defined by the
DESTINATIONID and FILTERID arguments which define the Name property of the
destination and filter associations of the subscription to be
removed.
The required DESTINATIONID argument defines the existing destination
instance that will be attached to the Filter reference of the association
class. This argument may consist of either the value of the Name property
of the target destination instance or the identity of that instance. The
identity is the full value of the Name property for permanent destinations
and is a component of the Name property for owned instances. If just the
identity is used, this will result in multiple destinations being found if
the same string is defined as the identity of an owned and permanent
destination.
The required FILTERID argument defines the existing filter instance that
will be attached to the 'Filter' reference of the association class. This
argument may consist of either the value of the 'Name' property of the
target filter instance or the identity of that instance. The identity is
the full value of the 'Name' property for permanent filters and is a
component of the 'Name' property for owned instances. If just the identity
is used, this may result in multiple filters being found if the same string
is defined as the identity of an owned and permanent filter.
This operation does not remove associated filter or destination instances
unless the option --remove-associated-instances is included in the command
and the associated instances are not used in any other association.
"""
context.execute_cmd(
# pylint: disable=line-too-long
lambda: cmd_subscription_remove_subscription(context, destination_identity, filter_identity, options)) # noqa: E501
@subscription_group.command('remove-server', cls=PywbemtoolsCommand,
options_metavar=CMD_OPTS_TXT)
@add_options(help_option)
@click.pass_obj
def subscription_remove_server(context, **options):
"""
Remove current WBEM server from the SubscriptionManager.
This command unregisters owned listeners from the WBEM server and removes
all owned indication subscriptions, owned indication filters, and owned
listener destinations for this server-id from the WBEM server.
"""
context.execute_cmd(
lambda: cmd_subscription_remove_server(context, options))
#####################################################################
#
# Class to communicate with WBEMSubscriptionManager
#
#####################################################################
def owned_flag_str(owned_flag):
"""Return string owned or all based on boolean owned_flag"""
return "owned" if owned_flag else "permanent"
class CmdSubscriptionManager(object):
"""
Encapsulate the initial parsing and common variables of subscriptions in
a single class and provide a set of methods that mirror the
SubscriptionManager class but for the defined subscription manager id and
server id.
All communication with the WBEMSubscriptionManager pass through this class
and exceptions from WBEMSubscriptionManager are captured in these
methods to simplify the command action functions.
Some of the WBEMSubscriptionManager methods have been compressed from
multiple (owned/all) methods into a single method with an owned parameter.
"""
def __init__(self, context, options):
"""
Initialize the CmdSubscriptionManager instance and the
WBEMSubscriptionManager instance with the subscriptionmananager_id
defined. This retrieves any owned objects from the WBEMServer defined
by the server_id.
Parameters:
context(:class:`'~pywbemtools._context_obj.ContextObj`):
The pywbemcli context object for the current command passed to
the action function.
options :class:`py:dict`:
The options variable passes to the current command action function.
"""
if not context.pywbem_server_exists():
raise click.ClickException("No WBEM server defined.")
self._context = context
if 'submgr_id' in options:
self._submgr_id = options['submgr_id']
else:
self._submgr_id = DEFAULT_SUB_MGR_ID
# ValueError and TypeError fall through. They are real programming
# errors in this code.
self._submgr = WBEMSubscriptionManager(
subscription_manager_id=self._submgr_id)
# Register the server in the subscription manager and get
# owned subscriptions, filter, and destinations from server
# Do not catch Value and Key exceptions. They fall through
self._server_id = self._submgr.add_server(
context.pywbem_server.wbem_server)
# Define the Name property prefix and search pattern for owned filters,
# etc. This is determined by a constant and the submgr_id string.
self.owned_destination_prefix = \
"pywbemdestination:{0}".format(self.submgr_id)
self.owned_filter_prefix = "pywbemfilter:{0}".format(self.submgr_id)
self.filter_name_pattern = re.compile(
r'^pywbemfilter:{0}:([^:]*)$'.format(self.submgr_id))
self.destination_name_pattern = re.compile(
r'^pywbemdestination:{0}:([^:]*)$'.format(self.submgr_id))
@property
def server_id(self):
"""
Return the server_id init variable
"""
return self._server_id
@property
def submgr(self):
"""
Return the submger object instance
"""
return self._submgr
@property
def submgr_id(self):
"""
Return the subscription manager id string
"""
return self._submgr_id
@property
def context(self):
"""
return the Click context object supplied to command action function
"""
return self._context
# The following methods interface to the pywbem SubscriptionManager.
# They all catch pywbem Error and some catch other exceptions.
def get_destinations(self, owned_flag):
"""
Get the owned indication destinations or all indication
destinations from WBEMSubscriptionManager. This method uses
pywbem.SubscriptionManager APIs to return either owned or all
destination instances based on the owned parameter.
Parameters:
owned (:class:`py:bool`):
If True,return only owned destination instances. Otherwise return
all destination instances the match to the filter_id or name.
Returns:
List of CIM_ListenerDestinationCIMXML objects
Raises:
click.ClickException if the request encounters an error.
"""
try:
if owned_flag:
return self.submgr.get_owned_destinations(self.server_id)
return self.submgr.get_all_destinations(self.server_id)
except Error as er:
raise click.ClickException(
self.err_msg("Get {0} destinations failed".
format(owned_flag_str(owned_flag)), er))
def get_filters(self, owned_flag):
"""
Get either the owned indication filters or all indication filters from
WBEMSubscriptionManager. This method uses pywbem.SubscriptionManager
APIs to return either owned filters or all filters based on the owned
parameter.
Parameters:
owned (:class:`py:bool`):
If True,return only owned filters. Otherwise return all filters
the match to the filter_id or name
Returns:
List of CIM_IndicationFilter objects
Raises:
click.ClickException if the request encounters an error.
"""
try:
if owned_flag:
return self.submgr.get_owned_filters(self.server_id)
return self.submgr.get_all_filters(self.server_id)
except Error as er:
raise click.ClickException(
self.err_msg("Get {0} filters failed".
format(owned_flag_str(owned_flag)), er))
def get_subscriptions(self, owned_flag):
"""
Get subscriptions from the server. This method uses
pywbem.SubscriptionManager APIs to return either owned subscriptions
or all subscriptions based on the owned parameter.
Parameters:
owned_flag (:class:`py:bool`):
True is owned, False is all subscriptions
Returns:
list of subscriptions either owned or all subscriptions
"""
try:
if owned_flag:
return self.submgr.get_owned_subscriptions(self.server_id)
return self.submgr.get_all_subscriptions(self.server_id)
except Error as er:
raise click.ClickException(
self.err_msg("Get {0} subscriptions failed".
format(owned_flag_str(owned_flag)), er))
def add_destination(self, listener_url, owned_flag, destination_id,
destination_name, persistence_type=None):
"""
Add listener destination URLs. Adds one listener destination to
the current WBEM server target using the Subscription Manager APIs.
Returns exception if destination already exists.
See pywbem.WBEMSubscriptionManager.add_destination for parameter
details.
Returns:
Instance created by pywbem SubscriptionManager.
Raises:
click.ClickException for all errors
"""
try:
dest = self.submgr.add_destination(
self.server_id, listener_url, owned_flag,
destination_id, destination_name,
persistence_type=persistence_type)
return dest
# Invalid input parameters exception
except ValueError as ve:
raise click.ClickException(
"add-destination failed: {0}".format(ve))
except Error as er:
raise click.ClickException(
self.err_msg("add-destination {0} failed".
format(owned_flag_str(owned_flag)), er))
def add_filter(self, source_namespaces, query,
query_language=DEFAULT_QUERY_LANGUAGE, owned_flag=True,
filter_id=None, name=None, source_namespace=None):
"""
Add an indication filter calls WBEMSubscriptionManager.add_filter and
captures exceptions. See WBEMSubscriptionManager for details of
parameters.
See pywbem.SubscriptionManager.add_filter for information on
parameters.
Returns:
The created indication filter instance created by
pywbwm SubscriptionManager.
Raises:
click.ClickException for all exceptions received from pywbem
"""
try:
return self.submgr.add_filter(
self.server_id, source_namespaces, query,
query_language=query_language, owned=owned_flag,
filter_id=filter_id, name=name,
source_namespace=source_namespace)
except (TypeError, ValueError) as exc:
raise click.ClickException(
self.err_msg("add-filter failed. Pywbem parameter error", exc))
except CIMError as ce:
if ce.status_code == CIM_ERR_ALREADY_EXISTS:
name_value = filter_id or name
raise click.ClickException(
"add-filter Failed. Filter name='{0}' already exists".
format(name_value))
raise click.ClickException(
self.err_msg("add-filter failed with server exception", ce))
except Error as er:
raise click.ClickException(
self.err_msg("add-filter failed with server exception", er))
def add_subscriptions(self, filter_path, destination_paths=None,
owned_flag=True):
"""
Add the subscription defined by filter_path and dest_path. Note that
if the path of the subscription already exists it is not added.
The ownership is defined by the ownership of the filter and
destination and they must match
"""
try:
return self.submgr.add_subscriptions(
self.server_id, filter_path,
destination_paths=destination_paths,
owned=owned_flag)
except ValueError as ex:
raise click.ClickException(
"add-subscription failed. pybwem SubscriptionManager "
"exception: {0}.".format(ex))
except Error as er:
raise click.ClickException(
self.err_msg("add-subscription failed", er))
def remove_destinations(self, destination_paths):
"""
Remove the destination instances defined by the destination paths
parameter.
Parameters:
See pywbem.SubscriptionManager.remove_destinations
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_FAILED, if there are referencing subscriptions.
""" # noqa: E501
try:
return self.submgr.remove_destinations(self.server_id,
destination_paths)
except Error as er:
raise click.ClickException(
self.err_msg("remove-destination failed", er))
def remove_filter(self, filter_path):
"""
Remove the filter defined by filter_path from the WBEM Server.
"""
try:
return self.submgr.remove_filter(self.server_id, filter_path)
except Error as er:
raise click.ClickException(
self.err_msg("remove-filter failed: {0} path: {1} failed".
format(er, filter_path), er))
def remove_subscriptions(self, subscription_paths):
"""
Remove subscriptions calls the SubscriptionManager remove_subscriptions
Raises:
click.ClickException if SubscriptionManager returns exception
"""
try:
return self.submgr.remove_subscriptions(self.server_id,
subscription_paths)
except Error as er:
raise click.ClickException(
self.err_msg("remove-subscriptions failed", er))
def remove_server(self):
"""
Remove the WBEM server defined by the server_id from the subscription
manager and also unregister destinations and remove owned
destinations, filters, and subscriptions from the server.
Raises:
click.ClickException if SubscriptionManager returns exception
"""
try:
self.submgr.remove_server(self.server_id)
except Error as er:
raise click.ClickException(self.err_msg("remove-Server failed", er))
# The following are local methods and only call pywbem
# SubscriptionManager through one of the above methods
def is_owned_destination(self, instance):
"""
Test Destination instance/path Name property/key and return True
if it has correct prefix for owned, type-choice is owned, all, or
permanent.
"""
return instance['Name'].startswith(self.owned_destination_prefix)
def is_owned_filter(self, instance):
"""
Test Filter instance Name property or instance_path Name key and return
True if it has correct prefix for owned
"""
return instance['Name'].startswith(self.owned_filter_prefix)
def is_owned_subscription(self, instance):
"""
Test if subscription instance and instance path are owned instances.
"""
hdlr = self.is_owned_destination(instance.path['Handler'])
fltr = self.is_owned_filter(instance.path['Filter'])
return fltr or hdlr
def _get_permanent_destinations(self):
"""
Get just the permanent destinations
"""
all_dests = self.get_destinations(False)
return [d for d in all_dests if not
d['Name'].startswith(self.owned_destination_prefix)]
def _get_permanent_filters(self):
"""
Get just the permanent filters
"""
all_filters = self.get_filters(False)
return [d for d in all_filters if not
d['Name'].startswith(self.owned_filter_prefix)]
def _get_permanent_subscriptions(self):
"""
Get just the permanent subscriptions (all - owned)
"""
all_subscriptions = self.get_subscriptions(False)
owned_subscriptions = self.get_subscriptions(True)
# Future: The test here is excessive. Should test only paths
return [s for s in all_subscriptions if s not in owned_subscriptions]
def get_destinations_for_owned_choice(self, choice):
"""
Get list of destinations based on choice where choice is all, permanent,
or owned. type-choice is owned, all, or permanent.
"""
assert choice in ('owned', 'all', 'permanent')
if choice == 'owned':
return self.get_destinations(True)
if choice == 'all':
return self.get_destinations(False)
return self._get_permanent_destinations()
def get_filters_for_owned_choice(self, choice):
"""
Get list of destinations based on choice where choice is all, permanent,
or owned.
"""
assert choice in ('owned', 'all', 'permanent')
if choice == 'owned':
return self.get_filters(True)
if choice == 'all':
return self.get_filters(False)
return self._get_permanent_filters()
def get_subscriptions_for_owned_choice(self, choice):
"""
Get list of subscriptions based on choice where choice is all,
permanent, or owned
"""
assert choice in ('owned', 'all', 'permanent')
if choice == 'owned':
return self.get_subscriptions(True)
if choice == 'all':
return self.get_subscriptions(False)
return self._get_permanent_subscriptions()
def find_destinations_for_name(self, destination_name):
"""
Find destination instances that match the identity provided
where the identity is the value of the Name property.
The identity prefix determines whether the owned or
all destination list is processed.
Parameters:
destination_name (:term:`string`):
Returns:
destination instances found that match destination_name or None
if no destinations with the destination_name parameter exist
"""
owned_destination_flag = destination_name.startswith(
self.owned_destination_prefix)
destinations = self.get_destinations(owned_destination_flag)
return [f for f in destinations if f['Name'] == destination_name]
def find_filters_for_name(self, filter_name):
"""
Find filter instances that match the identity provided
where the identity is the value of the Name property.
The name prefix determines whether the owned or
all filters lists are searched.
Parameters:
filter_name (:term:`string`):
Returns:
filter instances found that match destination_name or None
if no destinations with the filter_name parameter exist
"""
owned_filter_flag = filter_name.startswith(self.owned_filter_prefix)
filters = self.get_filters(owned_filter_flag)
return [f for f in filters if f['Name'] == filter_name]
def err_msg(self, text, er):
"""
Create a text message from the inputs and return it to the caller.
"""
return "{0}; {1}: Exception :{2}. Subscription mgr id: '{3}', " \
"server id: '{4}', " .format(er, text, er.__class__.__name__,
self.submgr, self.server_id)
##########################################################################
#
# Common functions/classes for this command group
#
##########################################################################
class BaseIndicationObjectInstance(object):
"""
Base class for the Indication Instances. Common attributes and methods.
These classes encapsulate the processing for a single indication
destination, filter, or subscription instance including parsing
Name property, getting property values, and building the select_id used
when instances are to be selected from the console.
"""
def __init__(self, csm, instance):
""" Initialize common attributes """
self.csm = csm
self.instance = instance
self._owned_flag = None
@property
def owned_flag_str(self):
"""
Returns boolean True if this instance is owned. Otherwise returns False
"""
return owned_flag_str(self._owned_flag)
def instance_property(self, property_name):
"""
Return the value of the property_name in the filter or an empty
string
"""
return self.instance.get(property_name, "")
class IndicationDestination(BaseIndicationObjectInstance):
"""
Process an IndicationDestination instance to create values for owned,
other identity characteristics.
"""
def __init__(self, csm, destination_instance):
"""
Initialize the object and parse the identity for components
"""
super(IndicationDestination, self).__init__(csm, destination_instance)
m = re.match(csm.destination_name_pattern,
self.instance.path.keybindings['Name'])
self._owned_flag = bool(m)
self.destination_id = m.group(1) if m else ""
self.destination_name = "" if m else \
self.instance.path.keybindings['Name']
self.identity = self.destination_id if self._owned_flag \
else self.destination_name
def select_id_str(self):
"""
Build an identification string for use in picking destinations.
Consists of ownership, url, destination property and name
Returns path without namespace as string to use in select.
"""
path = self.instance.path.copy()
path.namespace = None
return str(path)
class IndicationFilter(BaseIndicationObjectInstance):
"""
Class that contains name property parsing and other common methods for
procesing indication filter instances
"""
def __init__(self, csm, filter_instance):
super(IndicationFilter, self).__init__(csm, filter_instance)
path_ = filter_instance.path
m = re.match(csm.filter_name_pattern, path_.keybindings['Name'])
self._owned_flag = bool(m)
self.filter_id = m.group(1) if m else ""
self.filter_name = "" if m else path_.keybindings['Name']
self.identity = self.filter_id if self._owned_flag else self.filter_name
def select_id_str(self):
"""
Build an identification string for use in picking filters. This includes
properties from the instance that help to determine its uniqueness.
The Name property cannot always be this because the owned name is too
long so only the filter_id is used in this case and is not the
only item that can distinguish filters.
"""
path = self.instance.path.copy()
path.namespace = None
return str(path)
class IndicationSubscription(BaseIndicationObjectInstance):
"""
Process an IndicationSubscription instance to create values for owned,
other identity characteristics.
"""
def __init__(self, csm, subscription_instance):
super(IndicationSubscription, self).__init__(csm, subscription_instance)
self._owned_flag = csm.is_owned_subscription(subscription_instance)
def select_id_str(self):
"""
Build an identification string for use in picking subscriptions. This
short string contains information from the associated filter
and destination instances.
"""
conn = self.csm.context.pywbem_server.conn
filter_inst = conn.GetInstance(self.instance.path['Filter'])
dest_inst = conn.GetInstance(self.instance.path['Handler'])
# Get filter and destination select_id_str strings
filterinst = IndicationFilter(self.csm, filter_inst)
filter_str = filterinst.select_id_str()
destinst = IndicationDestination(self.csm, dest_inst)
dest_str = destinst.select_id_str()
return '{0} {1} {2}'.format(self._owned_flag, dest_str, filter_str)
def display_inst_nonnull_props(context, options, instances, output_format):
"""
Display the instances defined in instances after removing any properties
that are Null for all instances.
"""
pl = None
# Create a dictionary of all properties that have non-null values
pldict = {}
for inst in instances:
for name, prop in inst.properties.items():
if prop.value:
pldict[name] = True
pl = list(pldict.keys())
display_cim_objects(context, instances, output_format,
summary=options['summary'], property_list=pl)
def pick_one_inst_from_instances_list(csm, instances, pick_msg):
"""
Pick one instance from list using pywbemcli pick method.
Presents list of paths to user and returns with picked instance
"""
path = pick_one_path_from_instances_list(csm, instances, pick_msg)
rtn_inst = [i for i in instances if i.path == path]
assert len(rtn_inst) == 1
return rtn_inst[0]
def pick_one_path_from_instances_list(csm, instances, pick_msg):
"""
Pick one instance from list using the pywbemcli pick method.
This presents list of instance information to user and returns with picked
object path.
Returns:
Instance path of instance selected.
Raises:
click.ClickException for any error.
"""
assert instances is not None
instances = sort_cimobjects(instances)
paths = [inst.path for inst in instances]
# Get the short info() string for the class defined for this instance
if paths[0].classname == FILTER_CLASSNAME:
display_list = \
[IndicationFilter(csm, i).select_id_str() for i in instances]
elif paths[0].classname == DESTINATION_CLASSNAME:
display_list = \
[IndicationDestination(csm, i).select_id_str() for i in instances]
elif paths[0].classname == SUBSCRIPTION_CLASSNAME:
display_list = \
[IndicationSubscription(csm, i).select_id_str() for i in instances]
else:
click.echo("Class {0} is not one of pywbem subscription mgr classes".
format(paths[0].classname))
display_list = paths
try:
index = pick_one_index_from_list(csm.context, display_list, pick_msg)
return paths[index]
except ValueError as exc:
raise click.ClickException(str(exc))
def resolve_instances(csm, instances, identity, obj_type_name,
select_opt, cmd_str):
"""
Resolve list of instances to a single instance or exception.
If select_opt, ask user to select. Otherwise, generate
exception with info for each object to help user pick.
Parameters:
csm (csm object):
instances (list)
List of instances that are canidates to be used where only
one is allowed.
identity (:term:`string`):
String defining the identity that selected the instances.
Used in the select statement as part of the prompt.
object_type_name (:term: String):
either 'destination' or 'filter'. Used in exception messages.
select_opt (:class:`py:bool`):
The value of the select option. If True, the the prompt for
the user to select one of the list of instances will be
presented. If False, the command will be terminated with
cmd_str (:term:`string`)
The name of the command ('add' or 'remove') to be used in
the displays
Returns:
One of the instances in instances if it matches the identity
string
Raises:
click.ClickException: This terminates the current command
"""
# Exception, at least one instance must exist.
if not instances:
raise click.ClickException("No {0} found for identity: {1}".
format(obj_type_name, identity))
# Return the single item
if len(instances) == 1:
return instances[0]
# Ask user to pick one from list with prompt
if select_opt:
# FUTURE: Need better than path for prompt info.
inst = pick_one_inst_from_instances_list(
csm, instances, "Pick one {0} to use for {1}:".
format(obj_type_name, cmd_str))
return inst
# Generate an exception with summary info on the instances
paths_str = "\n *".join([str(i.path) for i in instances])
raise click.ClickException(
"The identity: '{0}' returned multiple {1} instances. "
"Use --select option to pick one instance from:\n * {2}".
format(identity, obj_type_name, paths_str))
def get_insts_for_subscription_identities(csm, destination_identity,
filter_identity, cmd_str,
select_opt):
"""
Identity resolution for add and remove subscriptions where two identities
are provided as arguments for the command. Returns the instances
found or an exception if no instances for the destination and filter found.
The identity can be either the full name as target or only the
id suffix of an owned element.
Returns: Two instances, instance of destination and instance of
filter if the identties match.
"""
# Initial and only search if destination includes owned prefix
destination_id_owned = destination_identity.startswith(
csm.owned_filter_prefix)
# Searches for match to the identity value provided
# FUTURE: may have issue here in that we have identity without ownership
destination_instances = csm.find_destinations_for_name(destination_identity)
# If the Identity does not include owned prefix, search also for
# the full owned name
if not destination_id_owned:
d2 = csm.find_destinations_for_name(
"{0}:{1}".format(csm.owned_destination_prefix,
destination_identity))
if d2:
destination_instances.extend(d2)
# Resolve to a single instance or use select if set or fail if resolve
# returns multiple instances.
sub_destination_inst = resolve_instances(
csm, destination_instances, destination_identity,
'destination', select_opt, cmd_str)
filter_id_owned = filter_identity.startswith(csm.owned_filter_prefix)
filter_instances = csm.find_filters_for_name(filter_identity)
# If Identity not specifically owned, look also in owned with the owned
# prefix. This may result in multiples that must be resolved
if not filter_id_owned:
f2 = csm.find_filters_for_name("{0}:{1}".format(
csm.owned_filter_prefix, filter_identity))
if f2:
filter_instances.extend(f2)
sub_filter_inst = resolve_instances(csm, filter_instances, filter_identity,
'filter', select_opt, cmd_str)
return sub_destination_inst, sub_filter_inst
#####################################################################
#
# Command action functions for each subcommands in the subscription group
#
#####################################################################
def get_CmdSubscriptionManager(context, options):
"""
Get the instance of CmdSubscriptionManager from the context or
instantiate a new instance. All subscription action functions should
call this method to set up the instance of subscription manager and
cache that instance in the context PywbemServer.
"""
if not context.pywbem_server_exists():
raise click.ClickException("No WBEM server defined.")
if context.pywbem_server.subscription_manager:
return context.pywbem_server.subscription_manager
csm = CmdSubscriptionManager(context, options)
context.pywbem_server.subscription_manager = csm
return context.pywbem_server.subscription_manager
def cmd_subscription_add_destination(context, identity, options):
"""
Add indication destination CIM instance to wbem server.
"""
csm = get_CmdSubscriptionManager(context, options)
owned_flag_opt = options['owned']
destination_id = identity if owned_flag_opt else None
destination_name = None if owned_flag_opt else identity
# FUTURE: Should we make this an input parameter under some circumstances.
# ex. if name, we could set transient or permanent. Always transient
# if filter_id has value.
persistence_type = "transient" if destination_id else "permanent"
# For permanent destinations. test if destination already exists before
# making request.
# We do not allow permanent destinations with same name property
# independent of complete path being equal to keep Name properties unique
if not owned_flag_opt:
dests = csm.find_destinations_for_name(destination_name)
if dests:
dests_str = ", ".join([str(d.path) for d in dests])
raise click.ClickException(
"{0} destination: Name=[{1}] add failed. Duplicates URL of "
"existing destination(s): [{2}. Pywbemcli does not allow "
"permanent destinations with same Name property to keep Name "
"properties unique.".
format(owned_flag_str(owned_flag_opt), destination_name,
dests_str))
destination_result = csm.add_destination(
options['listener_url'], owned_flag_opt, destination_id,
destination_name, persistence_type=persistence_type)
# Success: Show resulting name and conditionally, details
context.spinner_stop()
# If the URL, etc. of this owned add matches an existing owned destination
# pywbem returns the existing destination which has different name.
if owned_flag_opt and not destination_result['Name'].endswith(
":{}".format(destination_id)):
rslt_info = IndicationDestination(csm, destination_result)
raise click.ClickException(
"{0} destination: Name={1} Not added. Duplicates URL of "
"existing {2} destination: {3} URL: {4} PersistenceType: {5}.".
format(owned_flag_str(owned_flag_opt),
destination_id,
rslt_info.owned_flag_str,
destination_result['Name'],
destination_result['Destination'],
destination_result['PersistenceType']))
click.echo(
"Added {0} destination: Name={1}".
format(owned_flag_str(owned_flag_opt), destination_result['Name']))
if context.verbose:
click.echo("\npath={0}\n\n{1}".
format(str(destination_result.path),
destination_result.tomof()))
def cmd_subscription_add_filter(context, identity, options):
"""
Add a filter defined by the input argument to the target server.
"""
csm = get_CmdSubscriptionManager(context, options)
owned_flag_opt = options['owned']
filter_id = identity if owned_flag_opt else None
filter_name = None if owned_flag_opt else identity
# Get source namespaces, multiple strings in tuple and/or
# multiple namespace names comma-separated in any string in tuple.
# NOTE: SubscriptionManager requires list as input, not tuple
source_ns_opt = options['source_namespaces'] or \
[context.pywbem_server.conn.default_namespace]
source_namespaces = []
for ns in source_ns_opt:
if ',' in ns:
source_namespaces.extend(ns.split(','))
else:
source_namespaces.append(ns)
if not owned_flag_opt:
filters = csm.find_filters_for_name(filter_name)
if filters:
filters_str = ", ".join([str(f.path) for f in filters])
raise click.ClickException(
"{0} filter: Name=[{1}] add failed. Duplicates URL of "
"existing filters(s): [{2}. Pywbemcli does not allow "
"permanent filters with same Name property to keep Name "
"properties unique.".
format(owned_flag_str(owned_flag_opt), filter_name,
filters_str))
result_inst = csm.add_filter(source_namespaces, options['query'],
options['query_language'],
owned_flag_opt, filter_id,
filter_name)
# Success: Show resulting name and conditionally, details
context.spinner_stop()
click.echo("Added {0} filter: Name={1}".
format(owned_flag_str(owned_flag_opt), result_inst['Name']))
if context.verbose:
click.echo("\npath={0}\n\n{1}".
format(str(result_inst.path), result_inst.tomof()))
def cmd_subscription_add_subscription(context, destination_identity,
filter_identity, options):
"""
Add a subscription based on selected a filter and destination.
The owned option defines the ownership of the resulting indication
subscription.
If the owned option is True, either owned or permanent filters and
listeners may be attached.
If the owned option is False (--permanent) only permanent filters and
listeners may be attached
"""
csm = get_CmdSubscriptionManager(context, options)
owned_flag_opt = options['owned']
select_opt = options['select']
# Search the existing filters and destinations to find instances
# that match the destination_identity and filter_identity
sub_dest_inst, sub_filter_inst = get_insts_for_subscription_identities(
csm, destination_identity, filter_identity, 'add-subscription',
select_opt)
# Duplicates test in SubscriptionManager but with message for parameters of
# the command rather than the pywbem API.
if (csm.is_owned_filter(sub_filter_inst) or
csm.is_owned_destination(sub_dest_inst)) and not owned_flag_opt:
raise click.ClickException(
"Permanent subscriptions cannot be created with owned filters "
"or destinations. Create an owned subscription or use a "
"permanent filter and destination. Destination Name={0}, "
"Filter Name={1}".format(sub_dest_inst['Name'],
sub_filter_inst['Name']))
rslt = csm.add_subscriptions(sub_filter_inst.path,
sub_dest_inst.path, owned_flag_opt)
context.spinner_stop()
click.echo("Added {0} subscription: DestinationName={1}, FilterName={2}".
format(owned_flag_str(owned_flag_opt),
sub_dest_inst.path['Name'],
sub_filter_inst.path["Name"]))
if context.verbose:
click.echo("\n\n{0}".format(rslt[0].tomof()))
def cmd_subscription_list(context, options):
"""
Display overview information on the subscriptions, filters and indications
"""
# If --detail set, execute call to list all of the tables but
# with some options set to False
if options['detail']:
options['names_only'] = False
options['detail'] = False
cmd_subscription_list_destinations(context, options)
click.echo("\n")
cmd_subscription_list_filters(context, options)
click.echo("\n")
cmd_subscription_list_subscriptions(context, options)
return
output_format = validate_output_format(context.output_format,
['TEXT', 'TABLE'],
default_format="table")
csm = get_CmdSubscriptionManager(context, options)
summary_opt = options['summary']
all_subscriptions = csm.get_subscriptions_for_owned_choice(ALL_STR)
all_destinations = csm.get_destinations_for_owned_choice(ALL_STR)
all_filters = csm.get_filters_for_owned_choice(ALL_STR)
owned_subscriptions = csm.get_subscriptions(OWNED_STR)
owned_destinations = csm.get_destinations(OWNED_STR)
owned_filters = csm.get_filters(OWNED_STR)
if summary_opt:
headers = ['subscriptions', 'filters', 'destinations']
rows = [[len(all_subscriptions), len(all_filters),
len(all_destinations)]]
else:
headers = ['CIM_class', 'owned', 'permanent', 'all']
rows = []
rows.append([SUBSCRIPTION_CLASSNAME,
len(owned_subscriptions),
len(all_subscriptions) - len(owned_subscriptions),
len(all_subscriptions)])
rows.append([FILTER_CLASSNAME,
len(owned_filters),
len(all_filters) - len(owned_filters),
len(all_filters)])
rows.append([DESTINATION_CLASSNAME,
len(owned_destinations),
len(all_destinations) - len(owned_destinations),
len(all_destinations)])
# pylint: disable=consider-using-generator
rows.append(["TOTAL INSTANCES",
sum([r[1] for r in rows]),
sum([r[2] for r in rows]),
sum([r[3] for r in rows])])
summary_str = "summary" if summary_opt else ""
title = "Subscription instance {0} counts: submgr-id={1}, svr-id={2}". \
format(summary_str, csm.submgr_id, csm.server_id)
context.spinner_stop()
if output_format_is_table(output_format):
click.echo(format_table(rows, headers, title=title,
table_format=output_format))
else: # output in TEXT format
if summary_opt:
click.echo("{0} subscriptions, {1} filters, {2} destinations".
format(len(all_subscriptions), len(all_filters),
len(all_destinations)))
else:
for row in rows:
click.echo("{0}: {1}, {2}, {3}".format(row[0], row[1], row[2],
row[3]))
def get_reference_count(subscription_paths, inst_name, role):
"""
Get count of references to object_name for role in
CIM_IndicationSubscription instances. This implements
The alternate path covers cases where references returns an error
Returns: int
Count of instance that match the reference definition
Raises:
ClickException if Error exception occurs
"""
cnt = sum(path[role] == inst_name for path in subscription_paths)
return cnt
def cmd_subscription_list_destinations(context, options):
"""
List the subscription destinations objects found on the current connection.
Since these are complex objects there are a variety of display options
including table, CIM objects, etc.
"""
output_format = validate_output_format(context.output_format,
['CIM', 'TABLE'],
default_format="table")
csm = get_CmdSubscriptionManager(context, options)
ownedchoice_opt = (options['type']).lower()
destinations = csm.get_destinations_for_owned_choice(ownedchoice_opt)
if output_format_is_cimobject(output_format):
if options['names_only']:
paths = [inst.path for inst in destinations]
display_cim_objects(context, paths, output_format)
elif options['detail']:
display_inst_nonnull_props(context, options, destinations,
output_format)
else:
display_cim_objects(context, destinations, output_format,
summary=options['summary'])
elif output_format_is_table(output_format):
if options['names_only']:
paths = [inst.path for inst in destinations]
display_cim_objects(context, paths, output_format)
return
headers = ['Ownership', 'Identity', 'Name\nProperty', 'Destination',
'Persistence\nType', 'Protocol', 'Subscription\nCount']
if options['detail']:
headers.extend([
'CreationclassName', 'SystemCreationClassName', 'SystemName'])
rows = []
# FUTURE: summary with table not covered.
# subscription_paths = [s.path for s in csm.get_subscriptions(False)]
subscription_paths = [s.path for s in
csm.get_subscriptions_for_owned_choice("all")]
for dest in destinations:
ref_cnt = get_reference_count(subscription_paths,
dest.path, 'Handler')
d = IndicationDestination(csm, dest)
row = [d.owned_flag_str,
d.identity,
fold_strings(d.instance_property('Name'), 30,
break_long_words=True),
d.instance_property('Destination'),
d.instance_property('PersistenceType'),
d.instance_property('Protocol'),
ref_cnt]
if options['detail']:
row.extend([d.instance_property('CreationClassName'),
d.instance_property('SystemCreationClassName'),
d.instance_property('SystemName')])
rows.append(row)
title = "Indication Destinations: submgr-id={0}, svr-id={1}, " \
"type={2}". \
format(csm.submgr_id, csm.server_id, ownedchoice_opt)
context.spinner_stop()
click.echo(format_table(rows, headers, title=title,
table_format=output_format))
else:
assert False, "{0} Invalid output format for this command". \
format(output_format)
def cmd_subscription_list_filters(context, options):
"""
List the indication filters found in the current SubscriptionManager
object
"""
output_format = validate_output_format(context.output_format,
['CIM', 'TABLE'],
default_format="table")
csm = get_CmdSubscriptionManager(context, options)
filterchoice_opt = options['type']
details_opt = options['detail']
filters = csm.get_filters_for_owned_choice(filterchoice_opt)
if output_format_is_cimobject(output_format):
if options['names_only']:
paths = [inst.path for inst in filters]
display_cim_objects(context, paths, output_format,
options['summary'])
elif options['detail']:
display_inst_nonnull_props(context, options, filters,
output_format)
else:
display_cim_objects(context, filters, output_format,
summary=options['summary'])
elif output_format_is_table(output_format):
if options['names_only']:
paths = [inst.path for inst in filters]
display_cim_objects(context, paths, output_format)
return
headers = ['Ownership', 'Identity', 'Name\nProperty', 'Query',
'Query\nLanguage', 'Source\nNamespaces',
'Subscription\nCount']
if options['detail']:
headers.extend(
['CreationclassName', 'SystemCreationClassName',
'SystemName'])
rows = []
subscription_paths = [s.path for s in
csm.get_subscriptions_for_owned_choice("all")]
for filter_ in filters:
ref_cnt = get_reference_count(subscription_paths,
filter_.path, 'Filter')
f = IndicationFilter(csm, filter_)
row = [f.owned_flag_str,
f.identity,
fold_strings(f.instance_property('Name'), 30,
break_long_words=True),
fold_strings(f.instance_property('Query'), 25),
f.instance_property('QueryLanguage'),
"\n".join(f.instance_property('SourceNamespaces')),
ref_cnt]
if details_opt:
row.extend([
f.instance_property('CreationClassName'),
f.instance_property('SystemCreationClassName'),
f.instance_property('SystemName')])
rows.append(row)
title = "Indication Filters: submgr-id={0}, svr-id={1} type={2}". \
format(csm.submgr_id, csm.server_id, filterchoice_opt)
context.spinner_stop()
click.echo(format_table(rows, headers, title=title,
table_format=output_format))
else:
assert False, "{0} Invalid output format for this command". \
format(output_format)
def cmd_subscription_list_subscriptions(context, options):
"""
Display the list of indication subscriptions on the defined server.
"""
output_format = validate_output_format(context.output_format,
['CIM', 'TABLE'],
default_format="table")
csm = get_CmdSubscriptionManager(context, options)
svr_subscriptions = csm.get_subscriptions_for_owned_choice(options['type'])
# Get all destinations and filters
svr_destinations = csm.get_destinations(False)
svr_filters = csm.get_filters(False)
details_opt = options['detail']
# Otherwise display subscriptions, indications, filters.
# For each subscription, display the subscription, filter,
# and destination
inst_list = []
if output_format_is_cimobject(output_format):
for subscription in svr_subscriptions:
inst_list.append(subscription)
# Only show handler and filter instances if detail option
if details_opt:
for filter_ in svr_filters:
if subscription.path['Filter'] == filter_.path:
inst_list.append(filter_)
for dest in svr_destinations:
if subscription.path['Handler'] == dest.path:
inst_list.append(dest)
if options['names_only']:
paths = [inst.path for inst in svr_subscriptions]
display_cim_objects(context, paths, output_format,
options['summary'])
elif options['summary'] or not details_opt:
display_cim_objects(context, inst_list,
output_format='mof', summary=options['summary'])
elif details_opt:
display_inst_nonnull_props(context, options, inst_list,
output_format)
else:
display_cim_objects(context, inst_list, output_format,
summary=options['summary'])
elif output_format_is_table(output_format):
if options['names_only']:
paths = [inst.path for inst in svr_subscriptions]
display_cim_objects(context, paths, output_format)
return
headers = ['Ownership', 'Handler\nIdentity', 'Filter\nIdentity',
'Handler\nDestination', 'Filter\nQuery',
'Filter Query\nlanguage', 'Subscription\nStartTime']
if details_opt:
headers.extend(
['TimeOfLast\nStateChange', 'Subscription\nState'])
rows = []
conn = context.pywbem_server.conn
for subscription in svr_subscriptions:
is_ = IndicationSubscription(csm, subscription)
try:
filter_inst = conn.GetInstance(subscription.path['Filter'])
dest_inst = conn.GetInstance(subscription.path['Handler'])
except Error as er:
raise click.ClickException("GetInstance Failed {0}".format(er))
id_ = IndicationDestination(csm, dest_inst)
if_ = IndicationFilter(csm, filter_inst)
start_time = is_.instance_property('SubscriptionStartTime')
start_time = start_time.datetime.strftime("%x %X") if start_time \
else ""
row = [is_.owned_flag_str,
"{0}({1})".format(id_.identity, id_.owned_flag_str),
"{0}({1})".format(if_.identity, if_.owned_flag_str),
dest_inst['Destination'],
fold_strings(if_.instance_property('query'), 30),
filter_inst['QueryLanguage'],
start_time]
if details_opt:
row.extend([
is_.instance_property('CreationClassName'),
is_.instance_property('SystemCreationClassName')])
rows.append(row)
title = "Indication Subscriptions: submgr-id={0}, svr-id={1}, " \
"type={2}".format(csm.submgr_id, csm.server_id, options['type'])
context.spinner_stop()
click.echo(format_table(rows, headers, title=title,
table_format=output_format))
else:
assert False, "{0} Invalid output format for this command". \
format(output_format)
def verify_instances_removal(instance_names, instance_type):
"""Request that user verify instances to be removed."""
if isinstance(instance_names, list):
verify_paths = "\n".join([str(p) for p in instance_names])
else:
verify_paths = instance_names
verify_msg = "Verify {0} instance(s) to be removed:\n {1}". \
format(instance_type, verify_paths)
if not verify_operation(verify_msg):
raise click.ClickException("Instances not removed.")
def cmd_subscription_remove_destination(context, identity, options):
"""
Remove multiple destination objects from the WBEM Server.
"""
csm = get_CmdSubscriptionManager(context, options)
owned_flag_opt = options['owned']
if owned_flag_opt and not identity.startswith(csm.owned_destination_prefix):
target_name = "{0}:{1}".format(csm.owned_destination_prefix, identity)
else:
target_name = identity
dest_insts = csm.get_destinations(owned_flag_opt)
matching_destinations = [d for d in dest_insts if d['Name'] == target_name]
if not matching_destinations:
raise click.ClickException(
"No {0} destination found for identity={1}, Name-property={2}".
format(owned_flag_str(owned_flag_opt), identity, target_name))
if len(matching_destinations) == 1:
destination_path = matching_destinations[0].path
else: # Multiple instances returned
context.spinner_stop()
click.echo('{0} "{1}" multiple matching destinations'.
format(owned_flag_str(owned_flag_opt), identity))
if options['select']:
destination_path = pick_one_path_from_instances_list(
csm, matching_destinations,
"Pick indication destination to remove")
else:
inst_display = [IndicationDestination(csm, d).select_id_str() for
d in matching_destinations]
raise click.ClickException(
"Remove failed. Multiple destinations meet criteria "
"identity={0}, owned={1}. Use --select option to pick one "
"destination:\n * {2}".
format(identity, owned_flag_str(owned_flag_opt),
"\n * ".join(inst_display)))
if options['verify']:
verify_instances_removal(destination_path, 'destination')
name_property = destination_path['Name']
csm.remove_destinations(destination_path)
context.spinner_stop()
click.echo("Removed {0} indication destination: identity={1}, Name={2}.".
format(owned_flag_str(owned_flag_opt), identity, name_property))
if context.verbose:
click.echo("indication destination path: {0}.".format(destination_path))
return
def cmd_subscription_remove_filter(context, identity, options):
"""
Remove a single indication filter found by the get_all_filters
method.
"""
csm = get_CmdSubscriptionManager(context, options)
owned_flag_opt = options['owned']
# Determine if name should include owned identity components.
# Search depends on correct definition of owned option.
if owned_flag_opt and not identity.startswith(csm.owned_filter_prefix):
target_name = "{0}:{1}".format(csm.owned_filter_prefix, identity)
else:
target_name = identity
filter_insts = csm.get_filters(owned_flag_opt)
matching_filters = [f for f in filter_insts if f['Name'] == target_name]
if not matching_filters:
raise click.ClickException(
"No {0} filter found for identity={1}, Name-property={2}, ".
format(owned_flag_str(owned_flag_opt), identity, target_name))
# Multiples can only occur if outside client has added filters that match
# name but not other components of path. Owned flag on cmd eliminates
# multiples if permanent and owned ids are the same.
if len(matching_filters) > 1:
context.spinner_stop()
click.echo('{0} "{1}" multiple matching filters.'.
format(owned_flag_str(owned_flag_opt), identity))
if options['select']:
filter_path = pick_one_path_from_instances_list(
csm, matching_filters, "Pick indication filter to remove")
else:
inst_disp = [IndicationFilter(csm, f).select_id_str()
for f in matching_filters]
raise click.ClickException(
"Remove failed. Multiple filters meet criteria identity={0}, "
"owned={1}. Use --select option to pick one filter:\n * {2}".
format(identity, owned_flag_str(owned_flag_opt),
"\n * ".join(inst_disp)))
else: # one filter returned
filter_path = matching_filters[0].path
if options['verify']:
verify_instances_removal(filter_path, 'filter')
name_property = filter_path['Name']
csm.remove_filter(filter_path)
context.spinner_stop()
click.echo("Removed {0} indication filter: identity={1}, Name={2}.".
format(owned_flag_str(owned_flag_opt), identity, name_property))
if context.verbose:
click.echo("Indication filter path: {0}.".format(filter_path))
return
def cmd_subscription_remove_subscription(context, destination_identity,
filter_identity, options):
"""
Remove an indication subscription from the WBEM server. Removal is based
on the same parameter set as create, a destination and filter because
there is no identifying name on subscriptions.
"""
csm = get_CmdSubscriptionManager(context, options)
# find instances for the associations using the input identity parameters
dest_inst, filter_inst = get_insts_for_subscription_identities(
csm, destination_identity, filter_identity, 'remove-subscription',
options['select'])
# FUTURE: account for multiples subscription cases.
# FUTURE: account for owned/not-owned from the dest and filters when that
# works.
subscriptions = csm.get_subscriptions(False)
# Find the subscription defined by destination_identity and filter_identity
remove_list = []
for subscription in subscriptions:
if subscription.path['Filter'] == filter_inst.path and \
subscription.path['Handler'] == dest_inst.path:
remove_list.append(subscription)
if not remove_list:
raise click.ClickException(
"Arguments destination_id={0} and filter_id={1} did not locate "
"any subscriptions to remove."
.format(destination_identity, filter_identity))
if remove_list:
remove_paths = [i.path for i in remove_list]
if options['verify']:
verify_instances_removal(remove_paths, 'subscription')
# Get the list of destination paths to possibly remove these
# associations.
destination_paths = [i.path['Handler'] for i in remove_list]
filter_paths = [i.path['Filter'] for i in remove_list]
csm.remove_subscriptions(remove_paths)
context.spinner_stop()
click.echo("Removed {0} subscription(s) for destination-id: {1}, "
"filter-id: {2}.".
format(len(remove_paths), destination_identity,
filter_identity))
if context.verbose:
subscription_paths_str = '\n'.join([str(x) for x in remove_paths])
click.echo("Removed subscription(s) paths: {0}".
format(subscription_paths_str))
# If option set, remove filter and destination if not used in other
# associations:
# FUTURE: should we only remove owned instances???
if options['remove_associated_instances']:
conn = context.pywbem_server.conn
for dest_path in destination_paths:
dest_refs = conn.ReferenceNames(
dest_path, ResultClass=SUBSCRIPTION_CLASSNAME,
Role='Handler')
if not dest_refs:
csm.remove_destinations(dest_path)
click.echo("Removed destination: {0}".
format(dest_path))
for filter_path in filter_paths:
filter_refs = conn.ReferenceNames(
filter_path, ResultClass=SUBSCRIPTION_CLASSNAME,
Role='Filter')
if not filter_refs:
csm.remove_filter(filter_path)
click.echo("Removed filter: {0}".format(filter_path))
def cmd_subscription_remove_server(context, options):
"""
Remove the current server_id which also un-registers listener destinations
and removes all owned destinations, filters, and subscriptions.
"""
csm = get_CmdSubscriptionManager(context, options)
filters = csm.get_filters(True)
dests = csm.get_destinations(True)
subscripts = csm.get_subscriptions(True)
context.spinner_stop()
click.echo("Removing owned destinations, filters, and subscriptions "
"for server-id {0}. Remove counts: destinations={1}, "
"filters={2}, subscriptions={3}".
format(csm.server_id, len(dests), len(filters), len(subscripts)))
csm.remove_server()
context.pywbem_server.subscription_manager = None
| {
"content_hash": "b72338fb53b20a486dad65f82c47f5dc",
"timestamp": "",
"source": "github",
"line_count": 2120,
"max_line_length": 124,
"avg_line_length": 40.9566037735849,
"alnum_prop": 0.6339660017505874,
"repo_name": "pywbem/pywbemtools",
"id": "cc7dd10bffe270d37010224243e3b5fa772a104d",
"size": "87470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywbemtools/pywbemcli/_cmd_subscription.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "595"
},
{
"name": "Makefile",
"bytes": "32698"
},
{
"name": "Python",
"bytes": "1960612"
},
{
"name": "Shell",
"bytes": "18849"
}
],
"symlink_target": ""
} |
"""Shared utilities for the build recipe module."""
# This lists the products we want to isolate as outputs for future steps.
DEFAULT_BUILD_PRODUCTS = [
'dm',
'dm.exe',
'dm.app',
'fm',
'fm.exe',
'nanobench.app',
'get_images_from_skps',
'get_images_from_skps.exe',
'nanobench',
'nanobench.exe',
'skpbench',
'skpbench.exe',
'*.so',
'*.dll',
'*.dylib',
'skia_launcher',
'skottie_tool',
'lib/*.so',
'run_testlab',
]
def py_to_gn(val):
"""Convert val to a string that can be used as GN args."""
if isinstance(val, bool):
return 'true' if val else 'false'
elif '%s' % val == val:
# TODO(dogben): Handle quoting "$\
return '"%s"' % val
elif isinstance(val, (list, tuple)):
return '[%s]' % (','.join(py_to_gn(x) for x in val))
elif isinstance(val, dict):
gn = ' '.join(
'%s=%s' % (k, py_to_gn(v)) for (k, v) in sorted(val.items()))
return gn
else: # pragma: nocover
raise Exception('Converting %s to gn is not implemented.' % type(val))
def copy_listed_files(api, src, dst, product_list):
"""Copy listed files src to dst."""
api.python.inline(
name='copy build products',
program='''import errno
import glob
import os
import shutil
import sys
src = sys.argv[1]
dst = sys.argv[2]
build_products = %s
try:
os.makedirs(dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
for pattern in build_products:
path = os.path.join(src, pattern)
for f in glob.glob(path):
dst_path = os.path.join(dst, os.path.relpath(f, src))
if not os.path.isdir(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
print('Copying build product %%s to %%s' %% (f, dst_path))
shutil.move(f, dst_path)
''' % str(product_list),
args=[src, dst],
infra_step=True)
| {
"content_hash": "a6eae3cf940bcbac04c43c347acac403",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 74,
"avg_line_length": 24.33783783783784,
"alnum_prop": 0.6141032759578012,
"repo_name": "aosp-mirror/platform_external_skia",
"id": "060c711506b6e68bd77e9981fa2d65045663eb89",
"size": "1965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "infra/bots/recipe_modules/build/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "12716940"
},
{
"name": "Batchfile",
"bytes": "904"
},
{
"name": "C",
"bytes": "620774"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "27394853"
},
{
"name": "GLSL",
"bytes": "67013"
},
{
"name": "Go",
"bytes": "80137"
},
{
"name": "HTML",
"bytes": "1002516"
},
{
"name": "Java",
"bytes": "32794"
},
{
"name": "JavaScript",
"bytes": "51666"
},
{
"name": "Lex",
"bytes": "4372"
},
{
"name": "Lua",
"bytes": "70974"
},
{
"name": "Makefile",
"bytes": "2295"
},
{
"name": "Objective-C",
"bytes": "35223"
},
{
"name": "Objective-C++",
"bytes": "34410"
},
{
"name": "PHP",
"bytes": "120845"
},
{
"name": "Python",
"bytes": "1002226"
},
{
"name": "Shell",
"bytes": "49974"
}
],
"symlink_target": ""
} |
from rpython.rlib.objectmodel import specialize, we_are_translated, compute_hash
from rpython.tool.pairtype import extendabletype
# Union-Object to represent a json structure in a static way
class JsonBase(object):
__metaclass__ = extendabletype
is_string = is_int = is_float = is_bool = is_object = is_array = is_null = False
def __init__(self):
raise NotImplementedError("abstract base class")
def tostring(self):
raise NotImplementedError("abstract base class")
def is_primitive(self):
return False
def _unpack_deep(self):
"NON_RPYTHON"
def value_array(self):
raise TypeError
def value_object(self):
raise TypeError
def value_string(self):
raise TypeError
def value_float(self):
raise TypeError
def value_int(self):
raise TypeError
class JsonPrimitive(JsonBase):
def __init__(self):
pass
def is_primitive(self):
return True
class JsonNull(JsonPrimitive):
is_null = True
def tostring(self):
return "null"
def _unpack_deep(self):
return None
class JsonFalse(JsonPrimitive):
is_bool = True
def tostring(self):
return "false"
def _unpack_deep(self):
return False
class JsonTrue(JsonPrimitive):
is_bool = True
def tostring(self):
return "true"
def _unpack_deep(self):
return True
class JsonInt(JsonPrimitive):
is_int = True
def __init__(self, value):
self.value = value
def tostring(self):
return str(self.value)
def _unpack_deep(self):
return self.value
def value_int(self):
return self.value
class JsonFloat(JsonPrimitive):
is_float = True
def __init__(self, value):
self.value = value
def tostring(self):
return str(self.value)
def value_float(self):
return self.value
def _unpack_deep(self):
return self.value
class JsonString(JsonPrimitive):
is_string = True
def __init__(self, value):
self.value = value
def tostring(self):
# this function should really live in a slightly more accessible place
from pypy.objspace.std.bytesobject import string_escape_encode
return string_escape_encode(self.value, '"')
def _unpack_deep(self):
return self.value
def value_string(self):
return self.value
def hash_w(self):
x = compute_hash(self.value)
x -= (x == -1)
return x
def eq_w(self, w_other):
assert isinstance(w_other, JsonString)
return self.value == w_other.value
class JsonObject(JsonBase):
is_object = True
def __init__(self, dct):
self.value = dct
def tostring(self):
return "{%s}" % ", ".join(["\"%s\": %s" % (key, self.value[key].tostring()) for key in self.value])
def _unpack_deep(self):
result = {}
for key, value in self.value.iteritems():
result[key] = value._unpack_deep()
return result
def value_object(self):
return self.value
class JsonArray(JsonBase):
is_array = True
def __init__(self, lst):
self.value = lst
def tostring(self):
return "[%s]" % ", ".join([e.tostring() for e in self.value])
def _unpack_deep(self):
return [e._unpack_deep() for e in self.value]
def value_array(self):
return self.value
json_null = JsonNull()
json_true = JsonTrue()
json_false = JsonFalse()
class FakeSpace(object):
w_None = json_null
w_True = json_true
w_False = json_false
w_ValueError = ValueError
w_UnicodeDecodeError = UnicodeDecodeError
w_UnicodeEncodeError = UnicodeEncodeError
w_int = JsonInt
w_float = JsonFloat
def newtuple(self, items):
return None
def newdict(self):
return JsonObject({})
def newlist(self, items):
return JsonArray([])
def call_method(self, obj, name, arg):
assert name == 'append'
assert isinstance(obj, JsonArray)
obj.value.append(arg)
call_method._dont_inline_ = True
def call_function(self, w_func, *args_w):
assert 0
def setitem(self, d, key, value):
assert isinstance(d, JsonObject)
assert isinstance(key, JsonString)
d.value[key.value_string()] = value
def newutf8(self, x, ln):
return JsonString(x)
def newtext(self, x):
return JsonString(x)
newbytes = newtext
def unicode_w(self, s):
assert isinstance(s, JsonString)
string = s.value_string()
return string.decode('utf-8')
def newint(self, x):
return JsonInt(x)
def newfloat(self, x):
return JsonFloat(x)
fakespace = FakeSpace()
from pypy.module._pypyjson.interp_decoder import JSONDecoder
class OwnJSONDecoder(JSONDecoder):
def __init__(self, s):
self.space = fakespace
JSONDecoder.__init__(self, fakespace, s)
@specialize.arg(1)
def _raise(self, msg, *args):
raise ValueError(msg % args)
def decode_float(self, i):
start = i
while self.ll_chars[i] in "+-0123456789.eE":
i += 1
self.pos = i
return self.space.newfloat(float(self.getslice(start, i)))
def _create_dict(self, dct):
d = {}
for key, value in dct.iteritems():
d[key.value_string()] = value
return JsonObject(d)
def loads(s):
if not we_are_translated():
import json
from pycket.util import PerfRegion
data = json.loads(s)
with PerfRegion("json_convert"):
return _convert(data)
decoder = OwnJSONDecoder(s)
try:
w_res = decoder.decode_any(0)
i = decoder.skip_whitespace(decoder.pos)
if i < len(s):
start = i
end = len(s) - 1
raise ValueError("Extra data: char %d - %d" % (start, end))
return w_res
finally:
decoder.close()
def _convert(data):
if data is None:
return json_null
if data is False:
return json_false
if data is True:
return json_true
if isinstance(data, int):
return JsonInt(data)
if isinstance(data, float):
return JsonFloat(data)
if isinstance(data, unicode):
return JsonString(data.encode("utf-8"))
if isinstance(data, list):
return JsonArray([_convert(x) for x in data])
if isinstance(data, dict):
return JsonObject({key.encode("utf-8"): _convert(value)
for (key, value) in data.iteritems()})
| {
"content_hash": "e0b1bbd12492f2085f820d3f59aff5f6",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 107,
"avg_line_length": 23.13684210526316,
"alnum_prop": 0.5991810737033667,
"repo_name": "samth/pycket",
"id": "0bd25f6f85edc9c56d290153b6f4555375a000c5",
"size": "6594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycket/pycket_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654"
},
{
"name": "Eagle",
"bytes": "1986"
},
{
"name": "KiCad",
"bytes": "76411"
},
{
"name": "Makefile",
"bytes": "2680"
},
{
"name": "Python",
"bytes": "1050245"
},
{
"name": "Racket",
"bytes": "694687"
},
{
"name": "Scheme",
"bytes": "215"
},
{
"name": "Shell",
"bytes": "8656"
}
],
"symlink_target": ""
} |
"""
rohmu - azure object store interface
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
import dateutil.parser
import time
from azure.storage import BlobService # pylint: disable=no-name-in-module, import-error
from .base import BaseTransfer
class AzureTransfer(BaseTransfer):
def __init__(self, account_name, account_key, container_name, prefix=None):
# NOTE: Azure wants all paths to start with a slash
prefix = "/{}".format(prefix.lstrip("/") if prefix else "")
super().__init__(prefix=prefix)
self.account_name = account_name
self.account_key = account_key
self.container_name = container_name
self.conn = BlobService(account_name=self.account_name, account_key=self.account_key)
self.container = self.get_or_create_container(self.container_name)
self.log.debug("AzureTransfer initialized")
# XXX: AzureTransfer isn't actively tested and hasn't its error handling is probably lacking
self.log.warning("AzureTransfer is experimental and has not been thoroughly tested")
def get_metadata_for_key(self, key):
key = self.format_key_for_backend(key)
return self._list_blobs(key)[0]["metadata"]
def _metadata_for_key(self, key):
return self._list_blobs(key)[0]["metadata"]
def list_path(self, key):
path = self.format_key_for_backend(key, trailing_slash=True)
return self._list_blobs(path)
def _list_blobs(self, path):
self.log.debug("Listing path %r", path)
items = self.conn.list_blobs(self.container_name, prefix=path, delimiter="/", include="metadata")
result = []
for item in items:
result.append({
"last_modified": dateutil.parser.parse(item.properties.last_modified),
"metadata": item.metadata,
"name": self.format_key_from_backend(item.name),
"size": item.properties.content_length,
})
return result
def delete_key(self, key):
key = self.format_key_for_backend(key)
self.log.debug("Deleting key: %r", key)
return self.conn.delete_blob(self.container_name, key)
def get_contents_to_file(self, key, filepath_to_store_to):
key = self.format_key_for_backend(key)
self.log.debug("Starting to fetch the contents of: %r to: %r", key, filepath_to_store_to)
return self.conn.get_blob_to_path(self.container_name, key, filepath_to_store_to)
def get_contents_to_fileobj(self, key, fileobj_to_store_to):
key = self.format_key_for_backend(key)
self.log.debug("Starting to fetch the contents of: %r", key)
return self.conn.get_blob_to_file(self.container_name, key, fileobj_to_store_to)
def get_contents_to_string(self, key):
key = self.format_key_for_backend(key)
self.log.debug("Starting to fetch the contents of: %r", key)
return self.conn.get_blob_to_bytes(self.container_name, key), self._metadata_for_key(key)
def store_file_from_memory(self, key, memstring, metadata=None):
key = self.format_key_for_backend(key)
# Azure requires all metadata keys and values to be strings
metadata_to_send = dict((str(k), str(v)) for k, v in metadata.items())
self.conn.put_block_blob_from_bytes(self.container_name, key, memstring,
x_ms_meta_name_values=metadata_to_send)
def store_file_from_disk(self, key, filepath, metadata=None, multipart=None):
key = self.format_key_for_backend(key)
# Azure requires all metadata keys and values to be strings
metadata_to_send = dict((str(k), str(v)) for k, v in metadata.items())
self.conn.put_block_blob_from_path(self.container_name, key, filepath,
x_ms_meta_name_values=metadata_to_send)
def get_or_create_container(self, container_name):
start_time = time.time()
self.conn.create_container(container_name)
self.log.debug("Got/Created container: %r successfully, took: %.3fs", container_name, time.time() - start_time)
return container_name
| {
"content_hash": "ea0578b2e89ca68733107815d80f14f4",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 119,
"avg_line_length": 46.91011235955056,
"alnum_prop": 0.6467065868263473,
"repo_name": "kri5/pghoard",
"id": "bedd19be339d7c825a26b59a2c3ac206f5f57110",
"size": "4175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pghoard/rohmu/object_storage/azure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1692"
},
{
"name": "Python",
"bytes": "268988"
}
],
"symlink_target": ""
} |
"""Metadata linking semantic meaning of EIA 861 spreadsheet columns across years."""
| {
"content_hash": "aca50c5d77bf93ffebaabc674ff6a347",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 84,
"avg_line_length": 85,
"alnum_prop": 0.788235294117647,
"repo_name": "catalyst-cooperative/pudl",
"id": "9b9a38a27bdcabd6ffa51a826eadf51f6d0fb5c3",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/pudl/package_data/eia861/column_maps/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "7488"
},
{
"name": "Python",
"bytes": "1658629"
},
{
"name": "Shell",
"bytes": "5118"
}
],
"symlink_target": ""
} |
import json
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
from scipy import stats
import numpy
import commonfunctions as cf
root_directory = os.path.dirname(os.path.abspath(os.curdir))
with open('complexity-time-party.json', 'r') as f:
results = json.load(f)
r, d = [None] * 2
for party in results:
if party['party'] == 'r':
r = party['data']
elif party['party'] == 'd':
d = party['data']
# The graph plots on the Y axis the relative amount of common nouns
#
# This is optional code for linear regression information/lines
#
# linr = stats.linregress(results[0], results[1])
# print stats.linregress(results[0], results[1])
# x = numpy.linspace(1960,2020,10)
# y = [linr.intercept + linr.slope * x_ for x_ in x]
plt.style.use('ggplot')
fig = plt.figure(0)
ax = fig.gca()
ax.grid(b=False)
ax.set_axis_bgcolor('white')
ax.set_xlim([1956, 2020])
ax.set_xticks(xrange(1960, 2020, 8))
ax.plot(r[0], r[1], label='Republican', lw=2.5)
ax.set_xlabel('Year')
ax.plot(d[0], d[1], label='Democrat', lw=2.5)
ax.legend()
ax.set_ylabel('Proportion of nouns in dictionary of 504 most common nouns')
ax.set_title('Occurrence of the most common 504 nouns in US presidential election campaigns',
fontdict={'fontsize': 11,
'fontweight': mpl.rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': 'center'},
y=1.05)
plt.savefig(os.path.join(root_directory, 'images', 'analysis-complexity-time-party.svg'), format='svg')
print cf.generate_rawgit_img_embed(os.path.join('images', 'analysis-complexity-time-party.svg'))
| {
"content_hash": "d7c629027c280cc7d95eebb7f6cb2b56",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 103,
"avg_line_length": 30.6,
"alnum_prop": 0.6619132501485443,
"repo_name": "keelanfh/electionary",
"id": "75a1298380708b0fcb7718fec83232002c7f5099",
"size": "1683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/complexity-time-party-graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106552"
}
],
"symlink_target": ""
} |
import datetime
from haystack import indexes
from crowdcop_web.models import Campaign
class CampaignIndex(indexes.SearchIndex, indexes.Indexable):
text=indexes.CharField(document=True, use_template=True)
campaign_description=indexes.CharField(model_attr='campaign_description')
def get_model(self):
return Campaign
def index_queryset(self, using=None):
return self.get_model().objects
| {
"content_hash": "6134cc6a5f7c7948ac1053067b066eeb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 30.46153846153846,
"alnum_prop": 0.8055555555555556,
"repo_name": "bocaaust/CrowdCop",
"id": "08d46b70bcdff93c1977fe3cef87cb6b426ab9ce",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CrowdCop_test/crowdcop/search/search_indexes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "28"
},
{
"name": "CSS",
"bytes": "104673"
},
{
"name": "HTML",
"bytes": "199204"
},
{
"name": "JavaScript",
"bytes": "282446"
},
{
"name": "PHP",
"bytes": "229374"
},
{
"name": "Python",
"bytes": "60610"
}
],
"symlink_target": ""
} |
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import sys
sys.path.append("../..")
import pyalgotrade.logger
import lxml.html
import symbolsxml
logger = pyalgotrade.logger.getLogger("get_merval_symbols")
def find_company(htmlTree, ticker):
ret = None
anchor = htmlTree.xpath("//td[1]/a[@href='/q/pr?s=%s']/text()" % (ticker))
if anchor:
ret = anchor[0]
return ret
def find_sector(htmlTree):
ret = None
anchor = htmlTree.xpath("//th[1][text() = 'Sector:']/../td/a[1]/text()")
if anchor:
ret = anchor[0]
return ret
def find_industry(htmlTree):
ret = None
anchor = htmlTree.xpath("//th[1][text() = 'Industry:']/../td/a[1]/text()")
if anchor:
ret = anchor[0]
return ret
def process_symbol(writer, symbol):
try:
logger.info("Getting info for %s" % (symbol))
url = "http://finance.yahoo.com/q/in?s=%s+Industry" % (symbol)
htmlTree = lxml.html.parse(url)
company = find_company(htmlTree, symbol)
if not company:
raise Exception("Company name not found")
sector = find_sector(htmlTree)
if not sector:
sector = ""
logger.warning("Sector not found")
industry = find_industry(htmlTree)
if not industry:
industry = ""
logger.warning("Industry not found")
writer.addStock(symbol, company, sector, industry)
except Exception, e:
logger.error(str(e))
def main():
try:
writer = symbolsxml.Writer()
for symbol in open("merval-symbols.txt", "r"):
symbol = symbol.strip()
process_symbol(writer, symbol)
# Index
writer.addIndex("^MERV", "Merval")
logger.info("Writing merval.xml")
writer.write("merval.xml")
except Exception, e:
logger.error(str(e))
if __name__ == "__main__":
main()
| {
"content_hash": "9e6cb0a045be7dd4b8c2ec6d29f1415f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 24.3125,
"alnum_prop": 0.5825192802056556,
"repo_name": "cgqyh/pyalgotrade-mod",
"id": "ef1833e27338d54add85e3079afc1c0f56cb703f",
"size": "2559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/symbols/get_merval_symbols.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1066824"
},
{
"name": "Shell",
"bytes": "504"
}
],
"symlink_target": ""
} |
"""
The mixins module is a set of classes that provide action logic
re-used by **drf-ember** generic views.
"""
from rest_framework import mixins
from rest_framework import status
from rest_framework.response import Response
from .exceptions import JsonApiException
class ListModelMixin(object):
"""
List a queryset.
Raises:
JsonApiException: If ``include`` request parameter present
Returns:
Response
"""
def list(self, request, *args, **kwargs):
if 'include' in request.query_params:
raise JsonApiException()
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class RetrieveModelMixin(object):
"""
Retrieve a model instance.
Raises:
JsonApiException: If ``include`` request parameter present
Returns:
Response
"""
def retrieve(self, request, *args, **kwargs):
if 'include' in request.query_params:
raise JsonApiException()
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class DestroyModelMixin(object):
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
"""
Returns:
Response: Status 204 and a JSON API content type with bulk extension support
"""
instance = self.get_object()
self.perform_destroy(instance)
content_type = 'application/vnd.api+json; supported-ext="bulk"'
return Response(status=status.HTTP_204_NO_CONTENT, content_type=content_type)
def perform_destroy(self, instance):
"""
Hook for aditional delete logic. By default, just a call to
delete the passed instance.
Arguments:
instance: A Django model.
"""
instance.delete()
class UpdateModelMixin(object):
"""
Update a model instance.
"""
def update(self, request, *args, **kwargs):
"""
Handles partial resource update.
Returns:
Response: The response object contains status,
data if there was a server-side update,
and a JSON API content type with bulk extension support
"""
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
update = self.perform_update(serializer)
response_data = update['data'] if update['has_server_update'] else None
content_type = 'application/vnd.api+json; supported-ext="bulk"'
return Response(data=response_data, status=update['status'], content_type=content_type)
def perform_update(self, serializer):
"""
A hook for additional server-side update logic. The base use case
is for exclusive client-side data updates. Per the JSON API specification:
*If a server accepts an update but also changes the resource(s) in ways
other than those specified by the request (for example, updating the updated-at
attribute or a computed sha), it MUST return a 200 OK response.
The response document MUST include a representation of the updated resource(s)
as if a GET request was made to the request URL.*
*A server MUST return a 200 OK status code if an update is successful,
the client's current attributes remain up to date, and the server responds
only with top-level meta data. In this case the server MUST NOT include
a representation of the updated resource(s).*
You can override this hook in views that implement it and return an "update"
dict with the appropriate data after the server-side changes.
Arguments:
serializer: A Django REST Framework ``Serializer``. Remember, it can be
for an individual resource or a list of resources.
Returns:
dict: The key/values of the returned ``dict`` are:
- ``data``: The current state of the resource(s). Typically,
represented as Python native datatypes from a serializers
``data`` property. By default, the ``data`` attribute
for the passed serializer is returned; this data consists of
Python native data types, such as ``datetime.datetime``.
- ``has_server_update``: A boolean indicating if a
server-side update was done. ``False`` by default.
- ``status``: An integer with the HTTP status code the response should provide.
"""
serializer.save()
update = {
'data': serializer.data,
'has_server_update': False,
'status': 200
}
return update
def partial_update(self, request, *args, **kwargs):
"""
Ensures a ``True`` 'partial' kwarg is set before calling ``update`` method.
"""
kwargs['partial'] = True
return self.update(request, *args, **kwargs) | {
"content_hash": "44d2651ff5d5e249ee2ab9895fdd984b",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 95,
"avg_line_length": 36.24,
"alnum_prop": 0.6287711552612215,
"repo_name": "symfonico/drf-ember",
"id": "a03077214dfabb68b2cab05e2685af297a163f94",
"size": "5436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drf_ember/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183119"
}
],
"symlink_target": ""
} |
import json
import boto3
cfn = boto3.client('cloudformation')
paginator = cfn.get_paginator('describe_stacks')
pages = paginator.paginate(StackName="antioch-prod")
details = {}
for page in pages:
for stack in page['Stacks']:
for output in stack['Outputs']:
details[output['OutputKey']] = output['OutputValue']
with(open('zappa_settings.json')) as f:
cfg = json.loads(f.read())
cfg['prod']['vpc_config']['SecurityGroupIds'] = [details['WebappSecurityGroup']]
cfg['prod']['vpc_config']['SubnetIds'] = [
details['PrivateSubnet1AID'],
details['PrivateSubnet2AID'],
details['PrivateSubnet3AID']
]
cfg['prod']['environment_variables']['DB_HOST'] = details['DatabaseHost']
cfg['prod']['environment_variables']['DB_PORT'] = details['DatabasePort']
cfg['prod']['environment_variables']['STATIC_BUCKET'] = details['StaticBucketName']
with(open('zappa_settings.json', 'w')) as f:
f.write(json.dumps(cfg, indent=4))
print("Updated zappa_settings.json with stack variables.") | {
"content_hash": "2b92d1578a0dbeb684ff15c21b54db8c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 34.86206896551724,
"alnum_prop": 0.6913946587537092,
"repo_name": "philchristensen/antioch",
"id": "5c19b8ea52e9e789ac574c87316ffd6d84bbc0b3",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/update_zappa_settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12977"
},
{
"name": "Dockerfile",
"bytes": "850"
},
{
"name": "HTML",
"bytes": "21856"
},
{
"name": "JavaScript",
"bytes": "18844"
},
{
"name": "Python",
"bytes": "353482"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
} |
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(
grad, array_ops.shape(op.inputs[1]), op.inputs[2],
op.get_attr("strides"), op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")),
nn_ops.conv2d(
grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax -
array_ops.reshape(math_ops.reduce_sum(grad_softmax * softmax, [1]),
[-1, 1]))
* softmax)
return grad_x
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0],
depth_radius, bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
| {
"content_hash": "1a8d7ca2b60cc2cea323260fccba6046",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 82,
"avg_line_length": 35.08860759493671,
"alnum_prop": 0.6225348725348725,
"repo_name": "RyanYoung25/tensorflow",
"id": "a6f0e66a5f11c6a3b58402a5b3ca2723c888f5cf",
"size": "8994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/nn_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151630"
},
{
"name": "C++",
"bytes": "6922849"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "657597"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16175"
},
{
"name": "Jupyter Notebook",
"bytes": "777942"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "89536"
},
{
"name": "Python",
"bytes": "3835693"
},
{
"name": "Shell",
"bytes": "66697"
},
{
"name": "TypeScript",
"bytes": "329009"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.gis.admin import OSMGeoAdmin
from django.contrib.gis.db import models
from hstore_field import fields
class Item (models.Model):
name = models.CharField(max_length=64)
data = fields.HStoreField()
admin.site.register(Item)
class Related (models.Model):
item = models.ForeignKey(Item)
admin.site.register(Related)
class GeoItem (models.Model):
name = models.CharField(max_length=64)
point = models.PointField(null=True)
data = fields.HStoreField()
objects = models.GeoManager()
admin.site.register(GeoItem, OSMGeoAdmin)
| {
"content_hash": "f1ce2e792877c3d5b6b97b98cf161755",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 48,
"avg_line_length": 26.304347826086957,
"alnum_prop": 0.7520661157024794,
"repo_name": "erussell/hstore-field",
"id": "15bdd89c1ed99592bde4ca8103d222bb5b9a270d",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_hstore_field/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28416"
}
],
"symlink_target": ""
} |
import contextlib
import json
from django.core.management.base import BaseCommand
from lxml import etree
import os
from corehq.apps.app_manager.models import Application, RemoteApp
_parser = etree.XMLParser(remove_blank_text=True)
def normalize_xml(xml):
xml = etree.fromstring(xml, parser=_parser)
return etree.tostring(xml, pretty_print=True)
@contextlib.contextmanager
def record_performance_stats(filepath, slug):
from guppy import hpy
import time
hp = hpy()
before = hp.heap()
start = time.clock()
try:
yield
finally:
end = time.clock()
after = hp.heap()
leftover = after - before
with open(filepath, 'a') as f:
f.write('{},{},{}\n'.format(slug, leftover.size, end - start))
class Command(BaseCommand):
args = '<path_to_dir> <build-slug>'
help = """
Pass in a path to a directory (dir, below) with the following layout:
dir/
src/
[app-slug].json
[app-slug].json
...
"""
def handle(self, *args, **options):
path, build_slug = args
app_slugs = []
perfpath = os.path.join(path, '{}-performance.txt'.format(build_slug))
if os.path.exists(perfpath):
os.remove(perfpath)
for name in os.listdir(os.path.join(path, 'src')):
_JSON = '.json'
if name.endswith(_JSON):
app_slugs.append(name[:-len(_JSON)])
for slug in app_slugs:
print 'Fetching %s...' % slug
source_path = os.path.join(path, 'src', '%s.json' % slug)
with open(source_path) as f:
j = json.load(f)
if j['doc_type'] == 'Application':
app = Application.wrap(j)
elif j['doc_type'] == 'RemoteApp':
app = RemoteApp.wrap(j)
app.version = 1
if not app.domain:
app.domain = "test"
build_path = os.path.join(path, build_slug, slug)
print ' Creating files...'
with record_performance_stats(perfpath, slug):
files = app.create_all_files()
self.write_files(files, build_path)
def write_files(self, files, path):
for filename, payload in files.items():
filepath = os.path.join(path, filename)
dirpath, filename = os.path.split(filepath)
try:
os.makedirs(dirpath)
except OSError:
# file exists
pass
with open(filepath, 'w') as f:
if filepath.endswith('.xml'):
payload = normalize_xml(payload)
f.write(payload) | {
"content_hash": "c66fbc7c1c86f8fd5ee4c1d8728b871e",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 31.906976744186046,
"alnum_prop": 0.5379008746355685,
"repo_name": "puttarajubr/commcare-hq",
"id": "ca35daf7db09c5164963a0caccbb2d4a570c8495",
"size": "2744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/management/commands/build_apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
} |
"""
A WSGI application configured by an INI file,
which responds to requests according to a Swagger API specification.
"""
from webob import Request, Response
import abc
import webob.dec
import webob.exc
from urlparse import parse_qs
from swaggerapp.specification import SwaggerSpecification
from swaggerapp.encoder import JSONStreamingEncoder
import logging
# Pylint warns that the following class has too few public methods.
# It is not intended to have many (or even any) public methods,
# so this is not a problem, so the following comment silences the warning.
# Apparently, pylint assumes (falsely) that a class without public methods
# is being abused as a mere holder of data - but the below class is being
# used as a holder of code, as is common accepted practice in OOP.
# pylint: disable=R0903
class Application(object):
"""
Abstract base class for a WSGI application configured by an INI file
which responds to requests according to a Swagger API specification.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, configuration):
super(Application, self).__init__()
self.config = configuration
def _get_method(self, func_name):
"""
Find and return the method with the given name on this object,
or return None if no such method exists.
"""
return getattr(self, func_name, None)
@classmethod
def _expected_response(cls, operation):
"""
Given a Swagger operation, return the sole response.
If the specification allows no responses, we cannot succeed,
because HTTP requires a response to every request.
If the specification allows multiple responses,
we have no way to choose which will be returned,
so we must also fail.
"""
if 'responses' not in operation:
raise ValueError('No responses')
responses = operation['responses']
if len(responses) != 1:
raise ValueError('No or multiple responses')
return responses
@classmethod
def _expected_status(cls, req, operation):
"""
Given a Swagger operation, return the expected status.
This is the HTTP status that will be served in response
to a request mapping to this operation.
"""
if "options" == req.environ['REQUEST_METHOD'].lower():
return '200 OK'
resp = cls._expected_response(operation)
return resp.keys()[0]
@classmethod
def _expected_body(cls, operation):
"""
Given a Swagger operation, return the expected body.
This is the HTTP response body that will be served
in response to a request mapping to this operation.
"""
return cls._expected_response(operation).values()[0]
@classmethod
def _expected_schema(cls, operation):
"""
Given a Swagger operation, return the schema of the expected body.
This specifies the structure of the HTTP response body that will
be served in response to a request mapping to this operation.
"""
body = cls._expected_body(operation)
if 'schema' not in body:
raise ValueError('No schema in response')
return body['schema']
@classmethod
def _headers(cls):
"""
Return a list of ('Header-Name', 'Header-Value') tuples,
which should be added as HTTP headers to every response.
"""
return []
@classmethod
def _build_response(cls, req, return_value_iter, headers=None):
"""
Build an HTTP response to the given request, with response body
containing the data output by the given iterator.
Tailor the response to the datatypes specified by Swagger, if any.
"""
if isinstance(return_value_iter, webob.exc.WSGIHTTPException):
return return_value_iter
swagger = req.environ['swagger']
spec = swagger['spec']
if 'operation' in swagger:
operation = swagger['operation']
else:
operation = None
if operation:
schema = cls._expected_schema(operation)
expected_type = spec.resolve_refs(schema)
else:
schema = None
expected_type = None
status = cls._expected_status(req, operation)
if not headers:
headers = []
"""
TODO: XML response support, depending on content negotiation.
"""
headers.append(('Content-Type', 'application/json'))
for tup in cls._headers():
headers.append(tup)
if return_value_iter is None:
return_value_iter = iter()
if expected_type is None:
# Not sure what type to return
array_not_object = None
elif 'array' == expected_type:
# The specification says we're to return an array
array_not_object = True
elif 'object' == expected_type:
# The specification says we're to supply an object
array_not_object = False
else:
# A JSON response must be either an object or an array
raise ValueError(
"Cannot convert type '%s' into a valid JSON top-level type"
% expected_type
)
encoder = JSONStreamingEncoder()
json_iter = encoder.to_json(return_value_iter, array_not_object)
return Response(
status=status,
app_iter=json_iter,
headers=headers
)
@classmethod
def _allowed_methods(cls, spec, path):
"""
Which HTTP methods are allowed by the specification?
An OPTIONS method is permitted whether or not the specification
says so, since we synthesise those.
"""
if spec is None:
methods = []
elif path is None:
methods = [
method.upper() for method in SwaggerSpecification.methods
if method != 'options'
]
else:
methods = [
method.upper() for method in SwaggerSpecification.methods
if method != 'options' and method in path
]
# Synthesise an OPTIONS method
methods.append('OPTIONS')
return methods
@classmethod
def _options_response(cls, req):
"""
Respond to OPTIONS requests meaningfully,
implementing HATEOAS using the information in the Swagger catalogs.
"""
swagger = req.environ['swagger']
spec = swagger['spec']
path = swagger['path']
methods = cls._allowed_methods(spec, path)
if spec is None:
result = None
elif path is None:
result = spec
else:
result = path
headers = []
headers.append(('Allow', ','.join(methods)))
return cls._build_response(req, result, headers)
def _check_auth(self, req):
"""
Hook for subclasses to override to implement authentication
and/or authorisation. Allows everything by default.
"""
return True
@webob.dec.wsgify
def __call__(self, req_dict):
"""
Match the given request in the Swagger specification.
Requires WSGI environment information from either SwaggerMapper
or SwaggerMiddleware.
Respond with one of several things:
- For an OPTIONS request, respond with part/all of the spec
- For an unauthorised request, an HTTP error
- For a request that doesn't map to an operationId in the schema,
or maps to something not defined in Python, or maps to a private
method whose name begins with an underscore, an HTTP error
- The result of calling self.operation_<operationId>, which is expected to
return a ( body, headers ) tuple.
"""
req = Request(req_dict.environ)
if "options" == req.environ['REQUEST_METHOD'].lower():
# Intercept this request to return an OPTIONS response
return self._options_response(req)
# Require valid authentication/authorisation from this point onward
if not self._check_auth(req):
# Authentication or authorisation failed
return webob.exc.HTTPUnauthorized()
if 'wsgiorg.routing_args' in req.environ:
routing_args = req.environ['wsgiorg.routing_args']
method_params = routing_args[1]
if method_params:
method_name = method_params['method']
else:
# TODO: Include a link to the schema
return webob.exc.HTTPNotFound()
elif 'swagger' in req.environ:
swagger = req.environ['swagger']
logging.debug(swagger)
path = swagger['path']
operation = swagger['operation']
# If no Swagger path matched, 404 Not Found
if path is None:
logging.warning("No path matched requested URL")
# TODO: Include a link to the schema
return webob.exc.HTTPNotFound()
# If Swagger path matched, but no operation matched the HTTP
# method, HTTP Method Not Allowed
if operation is None:
logging.warning(
"No matching operation in path in API specification"
)
logging.debug(path)
# Include an Allow header listing acceptable request methods
headers = []
methods = self._allowed_methods(swagger, path)
headers.append(('Allow', ','.join(methods)))
# TODO: Include a link to the schema
return webob.exc.HTTPMethodNotAllowed(headers=headers)
if 'operationId' not in operation:
# This condition is also checked at API spec load time
raise ValueError("No operationId in Swagger specification")
method_name = 'operation_' + operation['operationId']
method_params = swagger['parameters']
else:
logging.error(
"Neither wsgiorg.routing_args nor swagger in environment"
)
logging.debug(req.environ)
# TODO: Include a link to the schema
return webob.exc.HTTPNotFound()
if method_name.startswith('_'):
# Attempt to call a private method
return webob.exc.HTTPForbidden()
method = self._get_method(method_name)
if method is None:
# Method specified in interface specification,
# but no matching Python method found
logging.warning(
self.__class__.__name__ +
" has no method '" +
method_name + "'"
)
return webob.exc.HTTPNotImplemented()
if ('QUERY_STRING' in req.environ) and req.environ['QUERY_STRING']:
try:
query_params = parse_qs(
req.environ['QUERY_STRING'], True, True
)
except ValueError:
return webob.exc.HTTPBadRequest(
"Failed to parse URL query parameters"
)
else:
query_params = dict()
query_params.update(method_params)
result, headers = method(req, query_params)
if isinstance(result, webob.exc.HTTPException):
return result
return self._build_response(req, result, headers)
| {
"content_hash": "ae0f636f72542560248a4fab659247a5",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 82,
"avg_line_length": 38.33112582781457,
"alnum_prop": 0.5933828610919143,
"repo_name": "NCI-Cloud/reporting-api",
"id": "9248b17e271a74569d7cefb6b7890c86cf321839",
"size": "11576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporting_api/common/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "61118"
},
{
"name": "Shell",
"bytes": "2295"
}
],
"symlink_target": ""
} |
"""
Part of pymzml test cases
"""
import os
from pymzml.file_classes.standardGzip import StandardGzip
import unittest
import random
from pymzml.spec import Spectrum, Chromatogram
import re
import struct
import test_file_paths
class StandardGzipTest(unittest.TestCase):
""""
"""
def setUp(self):
"""
"""
paths = test_file_paths.paths
self.File = StandardGzip(paths[1], "latin-1")
def tearDown(self):
"""
"""
self.File.close()
def test_getitem_5(self):
"""
"""
ID = 5
spec = self.File[ID]
self.assertIsInstance(spec, Spectrum)
self.assertEqual(spec.ID, ID)
def test_getitem_tic(self):
ID = "TIC"
chrom = self.File[ID]
self.assertIsInstance(chrom, Chromatogram)
self.assertEqual(chrom.ID, ID)
if __name__ == "__main__":
unittest.main(verbosity=3)
| {
"content_hash": "9068c780f755ea93ed7bd6914dc6b603",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 57,
"avg_line_length": 20.355555555555554,
"alnum_prop": 0.5917030567685589,
"repo_name": "StSchulze/pymzML",
"id": "7593f06cc9b1c45fe4e4bc186067be86fcc1e347",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/file_io_standard_gzip_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "364653"
},
{
"name": "Shell",
"bytes": "298"
}
],
"symlink_target": ""
} |
DATA_TYPE_INT = 0
DATA_TYPE_DOUBLE = 1
class WinPDHCounter(object):
def is_single_instance(self):
return False
def get_single_value(self):
return None
def get_all_values(self):
return {}
def _get_counter_dictionary(self):
return
| {
"content_hash": "2de48df49e3c818c1ffc12267c9d713e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 38,
"avg_line_length": 17.625,
"alnum_prop": 0.6170212765957447,
"repo_name": "DataDog/integrations-core",
"id": "edd3d69b3aa18e8645db7eb1a22619e345c42298",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadog_checks_base/datadog_checks/base/checks/win/winpdh_stub.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import cobra.models.fields.gzippeddict
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Node',
fields=[
('id', models.CharField(max_length=40, serialize=False, primary_key=True)),
('data', cobra.models.fields.gzippeddict.GzippedDictField()),
('timestamp', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
],
options={
'verbose_name': 'Node store',
'verbose_name_plural': 'Node store',
},
bases=(models.Model,),
),
]
| {
"content_hash": "33c7f0b5a26228b76f2cbf278a058575",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 102,
"avg_line_length": 29.37037037037037,
"alnum_prop": 0.5737704918032787,
"repo_name": "lyoniionly/django-cobra",
"id": "c19462be0572a1e71b12441e8cb92d865653ef46",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cobra/apps/nodestore/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "745958"
},
{
"name": "HTML",
"bytes": "254436"
},
{
"name": "JavaScript",
"bytes": "2679541"
},
{
"name": "Python",
"bytes": "1440198"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
try:
from SyntaxErroring import x
except Exception as e:
print("Importing with syntax error gave:", type(e), e)
try:
from IndentationErroring import x
except Exception as e:
print("Importing with indentation error gave:", type(e), e)
print("Finished.")
| {
"content_hash": "2d7f0b5e3d24c84fae8aa5aab95961fa",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 63,
"avg_line_length": 21.4,
"alnum_prop": 0.7165109034267912,
"repo_name": "tempbottle/Nuitka",
"id": "518fea9e4f7db56b6e80872fcaa47037223d88f3",
"size": "1090",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/programs/syntax_errors/Main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5518"
},
{
"name": "Batchfile",
"bytes": "1810"
},
{
"name": "C",
"bytes": "36149"
},
{
"name": "C++",
"bytes": "433315"
},
{
"name": "Python",
"bytes": "4356577"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
} |
nome = str(input("Digite seu nome completo: ")).strip()
nome = nome.lower()
#verifica = nome.find('silva') > 0
print("Seu nome tem Silva? ")
#print("{}".format(verifica))
print("{}".format("silva" in nome))
| {
"content_hash": "7d97bad55e7de479cbc38fd91980194f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 34.5,
"alnum_prop": 0.6473429951690821,
"repo_name": "AlbertoAlfredo/exercicios-cursos",
"id": "6bf71a67f777121fd8706d18d027a39eb556eb7c",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Curso-em-video/Python/aulas-python/Desafios/desafio025.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20725"
}
],
"symlink_target": ""
} |
import time
import re
def getPatternName():
pattern_json_path=os.getcwd()+"/VSP/patterns.json"
f = open (pattern_json_path, "r")
for line in f:
if "name" in line:
break
print "line:" + line
match = re.search(":( *)\"(.*)\",", line)
f.close()
return match.group(2).strip()
def shouldDeployVSP():
"""
test if we would deploy the VSP
to determine the behavior based on the property value in the proprty file
"""
f = open("pyautomation.properties", "r")
for line in f:
if "deployment.pattern" in line:
print "line:" + line
match = re.search(r"(.*)=(.*)", line)
try:
if match.group(2).strip() == "True":
return True
except:
pass
return False
f.close()
return False
def getjo():
"""
get variables names and values dynamically
from # Start VSP params to # Start VSP params in pyautomation.properties.
"""
dict = {}
f = open("pyautomation.properties", "r")
found = False
profile = None
cloud = None
for line in f:
if "envprofile.name1" in line:
print "line:" + line
match = re.search(r"(.*)=(.*)", line)
try:
pname = match.group(2).strip()
profile = deployer.environmentprofiles.list({'name':pname})[0]
#dict['environmentprofile'] = profile
dict['pname']=pname
except:
pass
continue
if "cloud1.name" in line:
print "line:" + line
match = re.search(r"(.*)=(.*)", line)
try:
cname = match.group(2).strip()
#cloud = deployer.clouds.list({'name':cname})[0]
print 'cloud name:' + cname
#dict['*.cloud'] = cloud
dict['cname']=cname
except:
pass
continue
if "vs.endtime" in line:
print "line:" + line
match = re.search(r"(.*)=(.*)", line)
try:
dict['endtime'] = time.time()+ long(match.group(2).strip())
except:
pass
continue
if "# Start VSP params" in line:
found = True
continue
if "# End VSP params" in line:
break
if found:
print "line:" + line
match = re.search(r"(.*)=(.*)", line)
try:
dict[match.group(1).strip()] = match.group(2).strip()
except:
pass
return dict
f.close()
def ImportDeployVSP():
cwd=os.getcwd()
pattern=None
try:
pattern=deployer.patterns.load(cwd +"/VSP")
except Exception,e:
print e
assert True , 'pattern loading errors'
pass
if shouldDeployVSP() == False:
exit ( 0 )
pattern_name=getPatternName()
found=False
for p in deployer.patterns.list():
if pattern_name.lower() in p.name.lower():
found=True
break
if found == False:
print "pattern:" + pattern_name + " not found!"
exit (1)
createParms = getjo()
pname=createParms['pname']
cname=createParms['cname']
cloud = deployer.clouds.list({'name':cname})[0]
print 'cloud to be deployed:', cloud
createParms['cloud'] = cloud
createParms['pattern'] = p
ts=time.time()
import datetime
st=datetime.datetime.fromtimestamp(ts).strftime("%Y%m%d%H%M")
createParms['name'] = "test_"+p.name + st
createParms['starttime'] = time.time()
createParms['*.ConfigPWD_ROOT.password']='ec11ipse'
createParms['*.ConfigPWD_USER.password']='ec11ipse'
try:
virtualSystem = deployer.virtualsystems << createParms
print 'virtual system created:\n%s' % virtualSystem
except ValueError, ve:
print str(ve)
ImportDeployVSP()
| {
"content_hash": "6683e7d667278aaf588d4a32b97b0e8a",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 78,
"avg_line_length": 29.288888888888888,
"alnum_prop": 0.5220030349013657,
"repo_name": "CLMDev/leanJazz",
"id": "6cd420d3769ea7c1daaee028de17074ac9dfbeeb",
"size": "4542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ucd_plugins/com.ibm.urbancode.plugin.commoncloud.vsp/ImportAndDeployVSP.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "855"
},
{
"name": "Groovy",
"bytes": "32856"
},
{
"name": "JavaScript",
"bytes": "211469"
},
{
"name": "Python",
"bytes": "73846"
},
{
"name": "Shell",
"bytes": "80513"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import sys
# from PIL import Image
def load_data(traindata_file_path, testdata_file_path, one_hot=True, classes=2):
traindata, trainlabel = read_file(traindata_file_path)
testdata, testlabel = read_file(testdata_file_path)
if one_hot:
trainlabel = onehot(trainlabel, classes)
testlabel = onehot(testlabel, classes)
return traindata, trainlabel, testdata, testlabel
def read_file(file_path):
file_list = os.listdir(file_path)
features = []
labels = []
for file in file_list:
with open(os.path.join(file_path, file), 'rb') as fb:
major = sys.version_info.major
data = {}
if major == 2:
import cPickle
data = cPickle.load(fb)
elif major == 3:
import pickle
data = pickle.load(fb, encoding='bytes')
features.append(data['data'])
labels.append(data['label'])
features = np.concatenate(features)
labels = np.concatenate(labels)
return features, labels
def onehot(ndarr, classes):
num_labels = ndarr.shape[0]
index_offset = np.arange(num_labels) * classes
labels_one_hot = np.zeros((num_labels, classes))
labels_one_hot.flat[index_offset + ndarr.ravel()] = 1
return labels_one_hot
| {
"content_hash": "10b9ee0f3c13f4f8b3e9943303702e7e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 80,
"avg_line_length": 27.895833333333332,
"alnum_prop": 0.614637789395071,
"repo_name": "gu-yan/mlAlgorithms",
"id": "544a4ed05e636d5098df4c7fc80b8da9ad59fbf1",
"size": "1364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mxnet/data_tool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "159631"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import shutil
import subprocess
import json
import sys
from six import unichr
from PIL import Image, ImageDraw, ImageFont
class MissingGlyphError(Exception):
pass
def color_font(name, code_point):
in_name = 'bitmaps/strike1/uni{}.png'.format(code_point)
out_name = 'out/unicode/{}.png'.format(code_point)
try:
shutil.copyfile(in_name, out_name)
except IOError:
raise MissingGlyphError('name: %r code_point: %r' % (name, code_point))
def bw_font(name, code_point):
char = unichr(int(code_point, 16))
AA_SCALE = 8
SIZE = (68, 68)
BIG_SIZE = tuple([x * AA_SCALE for x in SIZE])
# AndroidEmoji.ttf is from
# https://android.googlesource.com/platform/frameworks/base.git/+/master/data/fonts/AndroidEmoji.ttf
# commit 07912f876c8639f811b06831465c14c4a3b17663
font = ImageFont.truetype('AndroidEmoji.ttf', 65 * AA_SCALE)
image = Image.new('RGBA', BIG_SIZE)
draw = ImageDraw.Draw(image)
draw.text((0, 0), char, font=font, fill='black')
image.resize(SIZE, Image.ANTIALIAS).save('out/unicode/{}.png'.format(code_point), 'PNG')
# ttx is in the fonttools pacakge, the -z option is only on master
# https://github.com/behdad/fonttools/
# NotoColorEmoji.tff is from
# https://android.googlesource.com/platform/external/noto-fonts/+/kitkat-release/NotoColorEmoji.ttf
subprocess.call('ttx -v -z extfile NotoColorEmoji.ttf', shell=True)
try:
shutil.rmtree('out')
except OSError:
pass
os.mkdir('out')
os.mkdir('out/unicode')
emoji_map = json.load(open('emoji_map.json'))
# Fix data problem with red/blue cars being inaccurate.
emoji_map['blue_car'] = emoji_map['red_car']
emoji_map['red_car'] = emoji_map['oncoming_automobile']
failed = False
for name, code_point in emoji_map.items():
try:
color_font(name, code_point)
except MissingGlyphError:
try:
bw_font(name, code_point)
except Exception as e:
print(e)
print('Missing {}, {}'.format(name, code_point))
failed = True
continue
os.symlink('unicode/{}.png'.format(code_point), 'out/{}.png'.format(name))
if failed:
print("Errors dumping emoji!")
sys.exit(1)
| {
"content_hash": "5feb2b514343baf504d3fadc53482ba6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 104,
"avg_line_length": 28.531645569620252,
"alnum_prop": 0.6708074534161491,
"repo_name": "peiwei/zulip",
"id": "ef08b5ee304a9b8024c105d26a80c685db8e9908",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/setup/emoji_dump/emoji_dump.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "183830"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "397966"
},
{
"name": "JavaScript",
"bytes": "1588795"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "96085"
},
{
"name": "Python",
"bytes": "2010761"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "33341"
}
],
"symlink_target": ""
} |
import json
import os
import uuid
import pytest
import sdk_cmd
import sdk_hosts
import sdk_jobs
import sdk_plan
import sdk_upgrade
from tests import config
@pytest.mark.soak_backup
def test_backup_and_restore():
plan_parameters = {
'S3_BUCKET_NAME': os.getenv(
'AWS_BUCKET_NAME', 'infinity-framework-test'
),
'AWS_ACCESS_KEY_ID': os.getenv('AWS_ACCESS_KEY_ID'),
'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'),
'SNAPSHOT_NAME': str(uuid.uuid1()),
'CASSANDRA_KEYSPACES': '"testspace1 testspace2"',
}
with sdk_jobs.InstallJobContext([
config.get_write_data_job(),
config.get_verify_data_job(),
config.get_delete_data_job(),
config.get_verify_deletion_job()]):
config.run_backup_and_restore(
config.SERVICE_NAME,
'backup-s3',
'restore-s3',
plan_parameters)
@pytest.mark.soak_upgrade
def test_soak_upgrade_downgrade():
"""Install the Cassandra Universe package and attempt upgrade to master.
Assumes that the install options file is placed in the repo root."""
with open('cassandra.json') as options_file:
install_options = json.load(options_file)
sdk_upgrade.soak_upgrade_downgrade(
config.PACKAGE_NAME,
install_options["service"]["name"],
config.DEFAULT_TASK_COUNT,
additional_options=install_options)
@pytest.mark.soak_migration
def test_cassandra_migration():
backup_service_name = os.getenv('CASSANDRA_BACKUP_CLUSTER_NAME')
restore_service_name = os.getenv('CASSANDRA_RESTORE_CLUSTER_NAME')
backup_node_address = os.getenv('BACKUP_NODE_ADDRESS', config.DEFAULT_NODE_ADDRESS)
backup_node_port = os.getenv('BACKUP_NODE_PORT', config.DEFAULT_NODE_PORT)
backup_write_data_job = config.get_write_data_job(backup_node_address, backup_node_port)
backup_verify_data_job = config.get_verify_data_job(backup_node_address, backup_node_port)
backup_delete_data_job = config.get_delete_data_job(backup_node_address, backup_node_port)
backup_verify_deletion_job = config.get_verify_deletion_job(backup_node_address, backup_node_port)
plan_parameters = {
'S3_BUCKET_NAME': os.getenv(
'AWS_BUCKET_NAME', 'infinity-framework-test'
),
'AWS_ACCESS_KEY_ID': os.getenv('AWS_ACCESS_KEY_ID'),
'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'),
'SNAPSHOT_NAME': str(uuid.uuid1()),
'CASSANDRA_KEYSPACES': '"testspace1 testspace2"',
}
backup_install_job_context = sdk_jobs.InstallJobContext(
[backup_write_data_job, backup_verify_data_job,
backup_delete_data_job, backup_verify_deletion_job])
backup_run_job_context = sdk_jobs.RunJobContext(
before_jobs=[backup_write_data_job, backup_verify_data_job],
after_jobs=[backup_delete_data_job, backup_verify_deletion_job])
# Install and run the write/delete data jobs against backup cluster,
# running dcos-cassandra-service
with backup_install_job_context, backup_run_job_context:
# Back this cluster up to S3
backup_parameters = {
'backup_name': plan_parameters['SNAPSHOT_NAME'],
's3_access_key': plan_parameters['AWS_ACCESS_KEY_ID'],
's3_secret_key': plan_parameters['AWS_SECRET_ACCESS_KEY'],
'external_location': 's3://{}'.format(plan_parameters['S3_BUCKET_NAME']),
}
sdk_cmd.service_request('PUT', backup_service_name, '/v1/backup/start', json=backup_parameters)
sdk_plan.wait_for_completed_deployment(backup_service_name)
# Restore data to second instance:
restore_node_address = os.getenv(
'RESTORE_NODE_ADDRESS', sdk_hosts.autoip_host('sdk-cassandra', 'node-0-server'))
restore_node_port = os.getenv('RESTORE_NODE_PORT', '9052')
restore_write_data_job = config.get_write_data_job(restore_node_address, restore_node_port)
restore_verify_data_job = config.get_verify_data_job(restore_node_address, restore_node_port)
restore_delete_data_job = config.get_delete_data_job(restore_node_address, restore_node_port)
restore_verify_deletion_job = config.get_verify_deletion_job(restore_node_address, restore_node_port)
restore_install_job_context = sdk_jobs.InstallJobContext(
[restore_write_data_job, restore_verify_data_job,
restore_delete_data_job, restore_verify_deletion_job]
)
restore_run_job_context = sdk_jobs.RunJobContext(
after_jobs=[restore_verify_data_job, restore_delete_data_job, restore_verify_deletion_job]
)
with restore_install_job_context, restore_run_job_context:
sdk_plan.start_plan(
restore_service_name, 'restore-s3', parameters=plan_parameters
)
sdk_plan.wait_for_completed_plan(restore_service_name, 'restore-s3')
| {
"content_hash": "b7b63a6e782bff9f2093bd02ea00e592",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 105,
"avg_line_length": 42.8034188034188,
"alnum_prop": 0.6715255591054313,
"repo_name": "vishnu2kmohan/dcos-commons",
"id": "c751f22a88fd5958417d9e0d21a67f0b941cacde",
"size": "5008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frameworks/cassandra/tests/test_soak.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "168256"
},
{
"name": "HTML",
"bytes": "99573"
},
{
"name": "Java",
"bytes": "2770769"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "457961"
},
{
"name": "Shell",
"bytes": "46736"
}
],
"symlink_target": ""
} |
from django.db import models
from django.forms import ModelForm
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from datetime import datetime
import time
import hashlib
import logging
class Project(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True, editable=False)
upload_opens_at = models.DateTimeField()
upload_ends_at = models.DateTimeField()
active = models.BooleanField(default=True, verbose_name="Shall this project be listed ?")
email_endswith = models.CharField(max_length=75, verbose_name="Students e-mail should end with this domain")
reupload = models.BooleanField(verbose_name="May a student delete and re-upload a file ?", default=False)
title = models.CharField(max_length=120)
subject = models.TextField(blank=True)
def is_upload_open(self):
""" Shall we accept submission for this project, right now ?"""
now = datetime.now()
if now > self.upload_opens_at and now < self.upload_ends_at:
return True
return False
def __unicode__(self):
return self.title
def set_student(self, student):
self._student = student
def get_questions_answers(self):
qa = {}
for question in self.projectquestion_set.all():
qa[question.id] = {
'question': question,
'answers': question.studentanswer_set.filter(student=self._student).all()
}
return qa
class ProjectQuestion(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True, editable=False)
title = models.CharField(max_length=120)
subject = models.TextField(blank=True)
allow_file = models.BooleanField(default=False)
allow_answer = models.BooleanField(default=False)
project = models.ForeignKey(Project)
def __unicode__(self):
return "{0} {1}".format(self.project, self.title)
def get_answers(self):
try:
return self.studentanswer_set.filter(student=self.project._student).all()
except AttributeError:
logging.error("AttributeError")
return None
class AutoRegisteredStudent(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True, editable=False)
email = models.EmailField()
email_validated = models.BooleanField(default=False)
name = models.CharField(max_length=75)
surname = models.CharField(max_length=75)
project = models.ForeignKey(Project)
key = models.CharField(max_length=45, blank=True)
def __unicode__(self):
return self.email
def gen_key(self):
self.save()
self.key = "{0}{1}".format(self.id, hashlib.sha1("{0}{1}".format(settings.SECRET_KEY, self.project.id)).hexdigest())
self.save()
def get_answer(self, question):
return self.studentanswer_set.filter(question=question)
def send_invite(self):
send_mail( 'Access link: {0}'.format(self.project),
'{0}access/{1}'.format(settings.SITE_URL, self.key),
settings.SENDER,
[self.email, settings.SENDER],
fail_silently=False)
class AutoRegisteredStudentForm(ModelForm):
def clean_email(self):
if not self.cleaned_data['email'].endswith(self.instance.project.email_endswith):
raise forms.ValidationError("Your e-mail should end with {0}".format(self.instance.project.email_endswith))
return self.cleaned_data['email']
class Meta:
model = AutoRegisteredStudent
fields = ('email', 'name', 'surname', )
def get_student_file_path(instance, filename):
return '/'.join(['uploads', instance.question.project.slug, "student-{0}".format(instance.student.id), "{0}.{1}".format(time.time(), filename)])
class StudentFile(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True, editable=False)
student = models.ForeignKey(AutoRegisteredStudent)
question = models.ForeignKey(ProjectQuestion)
file = models.FileField(upload_to=get_student_file_path)
def __unicode__(self):
return "{0} {1}".format(self.student, self.question)
class StudentFileForm(ModelForm):
class Meta:
model = StudentFile
fields = ('file', )
class StudentAnswer(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
date_created = models.DateTimeField(auto_now_add=True, editable=False)
student = models.ForeignKey(AutoRegisteredStudent)
question = models.ForeignKey(ProjectQuestion)
title = models.CharField(max_length=120, blank=True)
answer = models.TextField()
def __unicode__(self):
return "{0} {1}".format(self.student, self.question)
class StudentAnswerForm(ModelForm):
class Meta:
model = StudentAnswer
fields = ('answer', )
| {
"content_hash": "e5c21850428aac4991ec69cdccfb0aeb",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 148,
"avg_line_length": 36.58571428571429,
"alnum_prop": 0.6788363920343615,
"repo_name": "flegoff/hcrendu",
"id": "cb2ff113fa51007d4788ba8fe1f095c74589b866",
"size": "5147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hcstudyprojects/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20208"
}
],
"symlink_target": ""
} |
__all__ = ['Source']
class Source(object):
__slots__ = ['body', 'name']
def __init__(self, body, name='GraphQL'):
self.body = body
self.name = name
def __eq__(self, other):
if isinstance(other, Source):
return self.body == other.body and self.name == other.name
return False
| {
"content_hash": "1d6716f55da3f105eff6f3a8c07a1ce0",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 23.928571428571427,
"alnum_prop": 0.5373134328358209,
"repo_name": "jhgg/graphqllib",
"id": "023a20d6d8ae5903c915d088f0677ac8e0965c11",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphql/core/language/source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "362575"
}
],
"symlink_target": ""
} |
from sqlcomplete.prioritization import PrevalenceCounter
def test_prevalence_counter():
counter = PrevalenceCounter()
sql = """SELECT * FROM foo WHERE bar GROUP BY baz;
select * from foo;
SELECT * FROM foo WHERE bar GROUP
BY baz"""
counter.update(sql)
keywords = ["SELECT", "FROM", "GROUP BY"]
expected = [3, 3, 2]
kw_counts = [counter.keyword_count(x) for x in keywords]
assert kw_counts == expected
assert counter.keyword_count("NOSUCHKEYWORD") == 0
names = ["foo", "bar", "baz"]
name_counts = [counter.name_count(x) for x in names]
assert name_counts == [3, 2, 2]
| {
"content_hash": "155801ba17e3f6519c8952534f537152",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 60,
"avg_line_length": 32.7,
"alnum_prop": 0.617737003058104,
"repo_name": "dbcli/sqlcomplete",
"id": "7aa77b8c7aa466983dba94156fb41af34ce99b16",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_prioritization.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "93621"
}
],
"symlink_target": ""
} |
from arcpy import AddMessage
import arcpy, csv, os, zipfile, glob, copy
from xml.dom.minidom import parse
class Export:
"""
Main class that exports CSV, KMZ, ZIP archive, Markdown metadata file.
"""
def __init__(self, path, output_name, debug):
"""
Inits Export instance with necessary paths and input data
information
"""
self.path = path
self.output_name = output_name
self.full_path = self.path + '\\' + self.output_name
self.shapefile = path + '\\shapefile\\' + output_name + '.shp'
self.desc = arcpy.Describe(self.shapefile)
self.fields = [i.name for i in arcpy.ListFields(self.shapefile)]
self.debug = debug
def __str__(self):
return 'ArcOpen Export class'
def _load(self, xml_file):
xmldoc = parse(xml_file).documentElement
return xmldoc
def _print_title(self, xml):
title = xml.getElementsByTagName('title')[0]
title = title.firstChild.nodeValue
return '# ' + title
def _print_data_dict(self, fields):
md = '### Data Dictionary\n\n'
md += '| Field | Description \n'
md += '| ----- | :----------: \n'
for field in fields:
md += '| ' + field + ' | \n'
return md
def _print_section(self, section):
elem, title = section
try:
content = self.source.getElementsByTagName(elem)[0]
content = content.firstChild.nodeValue
content = content.replace('\n', ' \n')
md = '\n\n' + '### ' + title + ' \n\n'
md += content
return md.encode('ascii', 'ignore')
except:
AddMessage('Error printing ' + title + ' section. Does this ' +
'section exist in your metadata?');
return ''
def csv(self):
shapefile_type = self.desc.shapeType
try:
if shapefile_type in ['Point', 'MultiPoint']:
with open(self.full_path + '.csv', 'wb') as f:
writer = csv.writer(f)
try:
self.fields.remove('Shape')
self.fields.remove('FID')
except:
pass
headers = copy.deepcopy(self.fields)
self.fields.append('SHAPE@XY')
headers.extend(['LAT', 'LNG'])
writer.writerow(headers)
cur = arcpy.SearchCursor(self.shapefile)
with arcpy.da.SearchCursor(self.shapefile, self.fields) as cur:
for row in cur:
lon, lat = row[-1]
coords = (lat, lon)
row = row[0:-1] + coords
writer.writerow(row)
return True
else:
AddMessage('Sorry, converting layers of geometry type ' +
shapefile_type + ' is not supported.')
return False
except Exception as err:
AddMessage('Unable to export CSV file: ' + str(err))
return False
def zip(self):
try:
match = self.path + '\\shapefile\\' + self.output_name + '.*'
zip_file = self.path + '\\shapefile\\' + self.output_name + '.zip'
files = glob.glob(match)
zf = zipfile.ZipFile(zip_file, mode='w')
for file in files:
try:
zf.write(file,
compress_type=zipfile.ZIP_DEFLATED,
arcname=os.path.basename(file))
except:
AddMessage('Could not include ' + file + ' in .zip archive!')
return False
zf.close()
return True
except Exception as err:
AddMessage('Unable to export ZIP archive: ' + str(err))
return False
def kmz(self):
kmz_file = self.full_path + '.kmz'
arcpy.MakeFeatureLayer_management(self.shapefile, self.output_name)
if arcpy.Exists(kmz_file):
arcpy.Delete_management(kmz_file)
try:
arcpy.LayerToKML_conversion(self.output_name, kmz_file, '',
'', self.output_name, '1024', '96',
'CLAMPED_TO_GROUND')
return True
except Exception as err:
AddMessage('Unable to export KMZ file: ' + str(err))
return False
def md(self):
install_dir = arcpy.GetInstallInfo('desktop')['InstallDir']
translator = install_dir + 'Metadata\\Translator\\ARCGIS2FGDC.xml'
if not os.path.isfile(translator):
AddMessage('Unable to export Markdown metadata file, ' +
'the XML translator file does not exist.')
return False
metadata = self.path + '\\shapefile\\temp\\README.xml'
arcpy.ESRITranslator_conversion(self.shapefile, translator, metadata)
top_sections = [
('purpose', 'Summary'),
('abstract', 'Description')
]
bottom_sections = [
('datacred', 'Credits'),
('useconst', 'Use Limitations')
]
self.source = self._load(metadata)
metadata_fields = copy.deepcopy(self.fields)
try:
metadata_fields.remove('SHAPE@XY')
except:
pass
self.markdown = ''
self.markdown = self._print_title(self.source)
for section in top_sections:
self.markdown += self._print_section(section)
self.markdown += ' \n\n'
self.markdown += self._print_data_dict(metadata_fields)
for section in bottom_sections:
self.markdown += self._print_section(section)
try:
md_file = open(self.path + '\\README.md', 'w')
md_file.write(self.markdown)
return True
except:
return False
finally:
md_file.close()
| {
"content_hash": "10d1c2f77b86c2a6a5f37b550514534b",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 83,
"avg_line_length": 35.801169590643276,
"alnum_prop": 0.5039202874877491,
"repo_name": "CityOfPhiladelphia/arc-open",
"id": "990e0dcbcd002d67991222fb7a2b614464cebacf",
"size": "6122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arc_open/export.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "42757"
}
],
"symlink_target": ""
} |
class PolicyPrefsTestCases(object):
"""A list of test cases for policy_prefs_ui.py."""
[BROWSER, SEARCH_ENGINES, PASSWORDS, AUTOFILL, CONTENT, HOMEPAGE, LANGUAGES,
ACCOUNTS] = range(8)
# Each policy has an entry with a tuple (Pref, Value, Pages, OS)
#
# |Pref| is the preference key for simple user policies that map directly to a
# preference (Refer to
# chrome/browser/policy/configuration_policy_handler_list.cc).
# Otherwise, set as None.
#
# |Value| is the value the policy should have when it is enforced.
#
# |Pages| is a list with integer indices into |settings_pages|, and lists all
# the pages that should display the managed-banner.
# Leave empty if this policy doesn't display anything visible in
# the settings.
#
# |OS| is a list of platforms where this policy should be tested. Valid
# platforms are 'win', 'mac', 'linux', and 'chromeos'. The list can be
# empty to skip the policy or set to OS_ALL if applicable to all
# platforms.
#
# ChromeOS device policies are also listed but are currently not tested by
# policy_prefs_ui.py.
[INDEX_PREF, INDEX_VALUE, INDEX_PAGES, INDEX_OS] = range(4)
OS_ALL = ['win', 'mac', 'linux', 'chromeos']
policies = {
'HomepageLocation':
('kHomePage', 'http://chromium.org', [HOMEPAGE], OS_ALL),
'HomepageIsNewTabPage':
('kHomePageIsNewTabPage', True, [HOMEPAGE], OS_ALL),
# TODO(joaodasilva): Couldn't verify on linux.
'DefaultBrowserSettingEnabled':
('kDefaultBrowserSettingEnabled', False, [], ['win', 'mac', 'linux']),
# TODO(joaodasilva): Test this on windows.
'ApplicationLocaleValue': ('kApplicationLocale', '', [], ['win']),
'AlternateErrorPagesEnabled':
('kAlternateErrorPagesEnabled', False, [BROWSER], OS_ALL),
'SearchSuggestEnabled':
('kSearchSuggestEnabled', False, [BROWSER], OS_ALL),
'DnsPrefetchingEnabled':
('kNetworkPredictionEnabled', False, [BROWSER], OS_ALL),
'DisableSpdy': ('kDisableSpdy', True, [], OS_ALL),
'DisabledSchemes': ('kDisabledSchemes', ['file'], [], OS_ALL),
'JavascriptEnabled': (None, False, [CONTENT], OS_ALL),
'IncognitoEnabled': (None, False, [], OS_ALL),
'IncognitoModeAvailability': (None, 1, [], OS_ALL),
'SavingBrowserHistoryDisabled':
('kSavingBrowserHistoryDisabled', True, [], OS_ALL),
'RemoteAccessClientFirewallTraversal': (None, True, [], OS_ALL),
# TODO(frankf): Enable on all OS after crbug.com/121066 is fixed.
'RemoteAccessHostFirewallTraversal':
('kRemoteAccessHostFirewallTraversal', True, [], []),
'PrintingEnabled': ('kPrintingEnabled', False, [], OS_ALL),
# Note: supported_on is empty for this policy.
'CloudPrintProxyEnabled': ('kCloudPrintProxyEnabled', True, [], []),
'CloudPrintSubmitEnabled':
('kCloudPrintSubmitEnabled', False, [], ['win', 'mac', 'linux']),
'SafeBrowsingEnabled': ('kSafeBrowsingEnabled', False, [BROWSER], OS_ALL),
# TODO(joaodasilva): This is only in place on official builds, but the
# SetUserCloudPolicy call is a nop on official builds. Should be BROWSER.
'MetricsReportingEnabled':
('kMetricsReportingEnabled', False, [], ['win', 'mac', 'linux']),
'PasswordManagerEnabled':
('kPasswordManagerEnabled', False, [BROWSER], OS_ALL),
# TODO(joaodasilva): Should be PASSWORDS too. http://crbug.com/97749
'PasswordManagerAllowShowPasswords':
('kPasswordManagerAllowShowPasswords', False, [BROWSER], OS_ALL),
'AutoFillEnabled': ('kAutofillEnabled', False, [BROWSER], OS_ALL),
'DisabledPlugins': ('kPluginsDisabledPlugins', ['Flash'], [], OS_ALL),
'EnabledPlugins': ('kPluginsEnabledPlugins', ['Flash'], [], OS_ALL),
'DisabledPluginsExceptions':
('kPluginsDisabledPluginsExceptions', ['Flash'], [], OS_ALL),
'DisablePluginFinder': ('kDisablePluginFinder', True, [], OS_ALL),
# TODO(joaodasilva): Should be PERSONAL. http://crbug.com/97749
'SyncDisabled': (None, True, [], OS_ALL),
'UserDataDir':
(None, '${users}/${user_name}/chrome-test', [], ['win', 'mac']),
'DiskCacheDir':
('kDiskCacheDir', '${user_home}/test-cache', [],
['win', 'mac', 'linux']),
'DiskCacheSize': ('kDiskCacheSize', 100, [], ['win', 'mac', 'linux']),
'MediaCacheSize': ('kMediaCacheSize', 200, [], ['win', 'mac', 'linux']),
'DownloadDirectory': (None, '${user_home}/test-downloads', [BROWSER],
['win', 'mac', 'linux']),
'ClearSiteDataOnExit': (None, True, [CONTENT], OS_ALL),
# TODO(joaodasilva): Should be BROWSER. http://crbug.com/97749
'ProxyMode': (None, 'direct', [], ['win', 'mac', 'linux']),
# TODO(joaodasilva): Should be BROWSER. http://crbug.com/97749
'ProxyServerMode': (None, 0, [], ['win', 'mac', 'linux']),
'ProxyServer':
(None, 'http://localhost:8080', [], ['win', 'mac', 'linux']),
'ProxyPacUrl':
(None, 'http://localhost:8080/proxy.pac', [],
['win', 'mac', 'linux']),
'ProxyBypassList': (None, 'localhost', [], ['win', 'mac', 'linux']),
# Note: this policy is only used internally for now.
'ProxySettings': (None, {}, [], []),
'EnableOriginBoundCerts':
('kEnableOriginBoundCerts', False, [], ['win', 'mac', 'linux']),
'DisableSSLRecordSplitting':
('kDisableSSLRecordSplitting', False, [], OS_ALL),
'EnableOnlineRevocationChecks':
('kCertRevocationCheckingEnabled', False, [], OS_ALL),
'AuthSchemes': ('kAuthSchemes', 'AuthSchemes', [], OS_ALL),
'DisableAuthNegotiateCnameLookup':
('kDisableAuthNegotiateCnameLookup', True, [], OS_ALL),
'EnableAuthNegotiatePort':
('kEnableAuthNegotiatePort', False, [], OS_ALL),
'AuthServerWhitelist': ('kAuthServerWhitelist', 'localhost', [], OS_ALL),
'AuthNegotiateDelegateWhitelist':
('kAuthNegotiateDelegateWhitelist', 'localhost', [], OS_ALL),
'GSSAPILibraryName':
('kGSSAPILibraryName', 'libwhatever.so', [], ['mac', 'linux']),
'AllowCrossOriginAuthPrompt':
('kAllowCrossOriginAuthPrompt', False, [], ['win', 'mac', 'linux']),
'ExtensionInstallBlacklist':
('kExtensionInstallDenyList', ['*'], [], OS_ALL),
'ExtensionInstallWhitelist':
('kExtensionInstallAllowList', ['lcncmkcnkcdbbanbjakcencbaoegdjlp'],
[], OS_ALL),
'ExtensionInstallForcelist':
('kExtensionInstallForceList', ['lcncmkcnkcdbbanbjakcencbaoegdjlp;' +
'https://clients2.google.com/service/update2/crx'], [], OS_ALL),
'ExtensionInstallSources':
('kExtensionAllowedInstallSites', ['https://www.corp.monkey.net/*'],
[], OS_ALL),
'ShowHomeButton': ('kShowHomeButton', True, [BROWSER], OS_ALL),
'DeveloperToolsDisabled': ('kDevToolsDisabled', True, [], OS_ALL),
'RestoreOnStartup': (None, 5, [BROWSER], OS_ALL),
# TODO(joaodasilva): Should be BROWSER. http://crbug.com/97749
'RestoreOnStartupURLs':
('kURLsToRestoreOnStartup', ['chromium.org'], [], OS_ALL),
# TODO(joaodasilva): The banner is out of place. http://crbug.com/77791
'BlockThirdPartyCookies':
('kBlockThirdPartyCookies', True, [CONTENT], OS_ALL),
# TODO(joaodasilva): Should be BROWSER. http://crbug.com/97749
'DefaultSearchProviderEnabled': (None, False, [], OS_ALL),
'DefaultSearchProviderName': (None, 'google.com', [], OS_ALL),
'DefaultSearchProviderKeyword': (None, 'google', [], OS_ALL),
# TODO(joaodasilva): Should be BROWSER. http://crbug.com/97749
'DefaultSearchProviderSearchURL':
(None, 'http://www.google.com/?q={searchTerms}', [], OS_ALL),
'DefaultSearchProviderSuggestURL':
(None, 'http://www.google.com/suggest?q={searchTerms}', [], OS_ALL),
'DefaultSearchProviderInstantURL':
(None, 'http://www.google.com/instant?q={searchTerms}', [], OS_ALL),
'DefaultSearchProviderIconURL':
(None, 'http://www.google.com/favicon.ico', [], OS_ALL),
'DefaultSearchProviderEncodings': (None, ['UTF-8'], [], OS_ALL),
'DefaultCookiesSetting':
('kManagedDefaultCookiesSetting', 2, [CONTENT], OS_ALL),
'DefaultImagesSetting':
('kManagedDefaultImagesSetting', 2, [CONTENT], OS_ALL),
'DefaultJavaScriptSetting': (None, 2, [CONTENT], OS_ALL),
'DefaultPluginsSetting':
('kManagedDefaultPluginsSetting', 2, [CONTENT], OS_ALL),
'DefaultPopupsSetting':
('kManagedDefaultPopupsSetting', 2, [CONTENT], OS_ALL),
'DefaultNotificationsSetting':
('kManagedDefaultNotificationsSetting', 2, [CONTENT], OS_ALL),
'DefaultGeolocationSetting':
('kManagedDefaultGeolocationSetting', 2, [CONTENT], OS_ALL),
'DefaultMediaStreamSetting':
('kManagedDefaultMediaStreamSetting', 2, [CONTENT], OS_ALL),
'AutoSelectCertificateForUrls':
('kManagedAutoSelectCertificateForUrls',
['{\'pattern\':\'https://example.com\',' +
'\'filter\':{\'ISSUER\':{\'CN\': \'issuer-name\'}}}'], [], OS_ALL),
'CookiesAllowedForUrls':
('kManagedCookiesAllowedForUrls', ['[*.]google.com'], [], OS_ALL),
'CookiesBlockedForUrls':
('kManagedCookiesBlockedForUrls', ['[*.]google.com'], [], OS_ALL),
'CookiesSessionOnlyForUrls':
('kManagedCookiesSessionOnlyForUrls', ['[*.]google.com'], [], OS_ALL),
'ImagesAllowedForUrls':
('kManagedImagesAllowedForUrls', ['[*.]google.com'], [], OS_ALL),
'ImagesBlockedForUrls':
('kManagedImagesBlockedForUrls', ['[*.]google.com'], [], OS_ALL),
'JavaScriptAllowedForUrls':
('kManagedJavaScriptAllowedForUrls', ['[*.]google.com'], [], OS_ALL),
'JavaScriptBlockedForUrls':
('kManagedJavaScriptBlockedForUrls', ['[*.]google.com'], [], OS_ALL),
'PluginsAllowedForUrls':
('kManagedPluginsAllowedForUrls', ['[*.]google.com'], [], OS_ALL),
'PluginsBlockedForUrls':
('kManagedPluginsBlockedForUrls', ['[*.]google.com'], [], OS_ALL),
'PopupsAllowedForUrls':
('kManagedPopupsAllowedForUrls', ['[*.]google.com'], [], OS_ALL),
'PopupsBlockedForUrls':
('kManagedPopupsBlockedForUrls', ['[*.]google.com'], [], OS_ALL),
'NotificationsAllowedForUrls':
('kManagedNotificationsAllowedForUrls', ['[*.]google.com'], [],
OS_ALL),
'NotificationsBlockedForUrls':
('kManagedNotificationsBlockedForUrls', ['[*.]google.com'], [],
OS_ALL),
'Disable3DAPIs': ('kDisable3DAPIs', True, [], OS_ALL),
'InstantEnabled': ('kInstantEnabled', False, [BROWSER], OS_ALL),
'TranslateEnabled': ('kEnableTranslate', False, [BROWSER], OS_ALL),
'AllowOutdatedPlugins': ('kPluginsAllowOutdated', False, [], OS_ALL),
'AlwaysAuthorizePlugins': ('kPluginsAlwaysAuthorize', True, [], OS_ALL),
'BookmarkBarEnabled': ('kShowBookmarkBar', False, [BROWSER], OS_ALL),
'EditBookmarksEnabled': ('kEditBookmarksEnabled', False, [], OS_ALL),
'AllowFileSelectionDialogs':
('kAllowFileSelectionDialogs', False, [BROWSER],
['win', 'mac', 'linux']),
'ImportBookmarks':
('kImportBookmarks', False, [], ['win', 'mac', 'linux']),
'ImportHistory':
('kImportHistory', False, [], ['win', 'mac', 'linux']),
'ImportHomepage':
('kImportHomepage', False, [], ['win', 'mac', 'linux']),
'ImportSearchEngine':
('kImportSearchEngine', False, [], ['win', 'mac', 'linux']),
'ImportSavedPasswords':
('kImportSavedPasswords', False, [], ['win', 'mac', 'linux']),
'MaxConnectionsPerProxy': ('kMaxConnectionsPerProxy', 32, [], OS_ALL),
'HideWebStorePromo': ('kNtpHideWebStorePromo', True, [], OS_ALL),
'URLBlacklist': ('kUrlBlacklist', ['google.com'], [], OS_ALL),
'URLWhitelist': ('kUrlWhitelist', ['google.com'], [], OS_ALL),
'EnterpriseWebStoreURL': ('kEnterpriseWebStoreURL', '', [], OS_ALL),
'EnterpriseWebStoreName': ('kEnterpriseWebStoreName', '', [], OS_ALL),
'EnableMemoryInfo': ('kEnableMemoryInfo', True, [], OS_ALL),
'DisablePrintPreview':
('kPrintPreviewDisabled', True, [], ['win', 'mac', 'linux']),
'BackgroundModeEnabled':
('kBackgroundModeEnabled', True, [BROWSER], ['win', 'linux']),
'RestrictSigninToPattern': ('kGoogleServicesUsernamePattern',
'.*@google.com', [], ['win', 'mac', 'linux']),
'DisableSafeBrowsingProceedAnyway':
('kSafeBrowsingProceedAnywayDisabled', True, [], OS_ALL),
# TODO(joaodasilva): this policy affects the BROWSER settings page but
# is only included in official builds.
'SpellCheckServiceEnabled':
('kSpellCheckUseSpellingService', False, [], OS_ALL),
# ChromeOS-only policies:
# TODO(frankf): Add prefs for these after crosbug.com/28756 is fixed.
'ChromeOsLockOnIdleSuspend':
(None, True, [BROWSER], ['chromeos']),
'PolicyRefreshRate':
(None, 300000, [], ['chromeos']),
'OpenNetworkConfiguration': (None, '', [], ['chromeos']),
'GDataDisabled': (None, True, [], ['chromeos']),
'GDataDisabledOverCellular':
(None, True, [], ['chromeos']),
'PinnedLauncherApps': (None, [], [], ['chromeos']),
# ChromeOS Device policies:
'DevicePolicyRefreshRate': (None, 300000, [], ['chromeos']),
'ChromeOsReleaseChannel': (None, 'stable-channel', [], ['chromeos']),
'ChromeOsReleaseChannelDelegated': (None, False, [], ['chromeos']),
'DeviceOpenNetworkConfiguration': (None, '', [], ['chromeos']),
'ReportDeviceVersionInfo': (None, True, [], ['chromeos']),
'ReportDeviceActivityTimes': (None, True, [], ['chromeos']),
'ReportDeviceBootMode': (None, True, [], ['chromeos']),
'DeviceAllowNewUsers': (None, True, [], ['chromeos']),
'DeviceUserWhitelist': (None, [], [], ['chromeos']),
'DeviceGuestModeEnabled': (None, True, [], ['chromeos']),
'DeviceShowUserNamesOnSignin': (None, True, [], ['chromeos']),
'DeviceDataRoamingEnabled': (None, True, [], ['chromeos']),
'DeviceMetricsReportingEnabled': (None, True, [], ['chromeos']),
'DeviceEphemeralUsersEnabled': (None, True, [], ['chromeos']),
'DeviceIdleLogoutTimeout': (None, 60000, [], ['chromeos']),
'DeviceIdleLogoutWarningDuration': (None, 15000, [], ['chromeos']),
'DeviceLoginScreenSaverId':
(None, 'lcncmkcnkcdbbanbjakcencbaoegdjlp', [], ['chromeos']),
'DeviceLoginScreenSaverTimeout': (None, 30000, [], ['chromeos']),
'DeviceStartUpUrls': (None, ['http://google.com'], [], ['chromeos']),
'DeviceAppPack': (None, [], [], ['chromeos']),
'DeviceAutoUpdateDisabled': (None, True, [], ['chromeos']),
'DeviceTargetVersionPrefix': (None, '1412.', [], ['chromeos']),
'DeviceUpdateScatterFactor': (None, '7200', [], ['chromeos']),
'DeviceUpdateAllowedConnectionTypes': (None, [], [], ['chromeos']),
'ReportDeviceLocation': (None, False, [], ['chromeos']),
# Chrome Frame policies:
'ChromeFrameRendererSettings': (None, 0, [], []),
'RenderInChromeFrameList': (None, ['google.com'], [], []),
'RenderInHostList': (None, ['google.com'], [], []),
'ChromeFrameContentTypes': (None, ['text/xml'], [], []),
'GCFUserDataDir': (None, '${user_name}/test-frame', [], []),
'AdditionalLaunchParameters': (None, '--enable-media-stream', [], []),
}
| {
"content_hash": "37f0ac84c40aabeef15fbdf0200227b1",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 80,
"avg_line_length": 53.107638888888886,
"alnum_prop": 0.6301405688133377,
"repo_name": "keishi/chromium",
"id": "8489ff9699b42ddd19622586fa364a5282953214",
"size": "15770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chrome/test/functional/policy_test_cases.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "C",
"bytes": "67452317"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "132681259"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "19048"
},
{
"name": "Java",
"bytes": "361412"
},
{
"name": "JavaScript",
"bytes": "16603687"
},
{
"name": "Objective-C",
"bytes": "9609581"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "918683"
},
{
"name": "Python",
"bytes": "6407891"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Shell",
"bytes": "4192593"
},
{
"name": "Tcl",
"bytes": "277077"
}
],
"symlink_target": ""
} |
import operator
from copy import deepcopy
import matplotlib
import numpy as np
import pytest
from astropy import units as u
from marvin.core.exceptions import MarvinError
from tests import marvin_test_if
from marvin.tools.maps import Maps
from marvin.tools.quantities import EnhancedMap, Map
from marvin.utils.datamodel.dap import datamodel
from marvin.utils.general.maskbit import Maskbit
value1 = np.array([[16.35, 0.8],
[0, -10.]])
value2 = np.array([[591., 1e-8],
[4., 10]])
value_prod12 = np.array([[9.66285000e+03, 8e-9],
[0, -100]])
value_log2 = np.array([[2.77158748, -8.],
[0.60205999, 1.]])
ivar1 = np.array([[4, 1],
[6.97789734e+36, 1e8]])
ivar2 = np.array([[10, 1e-8],
[5.76744385e+36, 0]])
ivar_sum12 = np.array([[2.85714286e+00, 9.99999990e-09],
[3.15759543e+36, 0]])
ivar_prod12 = np.array([[1.10616234e-05, 1.56250000e-08],
[0, 0.]])
ivar_pow_2 = np.array([[5.23472002e-08, 9.53674316e-01],
[0, 25]])
ivar_pow_05 = np.array([[3.66072168e-03, 7.81250000e+00],
[0, 0]])
ivar_pow_0 = np.array([[0, 0],
[0, 0]])
ivar_pow_m1 = np.array([[4, 1.],
[0, 1e+08]])
ivar_pow_m2 = np.array([[2.67322500e+02, 1.6e-01],
[0, 2.5e+09]])
ivar_pow_m05 = np.array([[0.97859327, 5],
[0, 0]])
ivar_log1 = np.array([[3.67423420e-04, 4.34294482e+07],
[4.11019127e-20, 4.34294482e-06]])
u_flux = u.erg / u.cm**2 / u.s / u.def_unit('spaxel')
u_flux2 = u_flux * u_flux
ufuncs = [it for it in dir(np) if isinstance(getattr(np, it), np.ufunc)]
def _get_maps_kwargs(galaxy, data_origin):
if data_origin == 'file':
maps_kwargs = dict(filename=galaxy.mapspath)
else:
maps_kwargs = dict(plateifu=galaxy.plateifu, release=galaxy.release,
bintype=galaxy.bintype, template_kin=galaxy.template,
mode='local' if data_origin == 'db' else 'remote')
return maps_kwargs
@pytest.fixture(scope='function', params=[('emline_gflux', 'ha_6564'),
('emline_gvel', 'oiii_5008'),
('stellar_vel', None),
('stellar_sigma', None)])
def map_(request, galaxy, data_origin):
maps = Maps(**_get_maps_kwargs(galaxy, data_origin))
map_ = maps.getMap(property_name=request.param[0], channel=request.param[1])
map_.data_origin = data_origin
return map_
class TestMap(object):
def test_map(self, map_, galaxy):
assert map_.getMaps().release == galaxy.release
assert tuple(map_.shape) == tuple(galaxy.shape)
assert map_.value.shape == tuple(galaxy.shape)
assert map_.ivar.shape == tuple(galaxy.shape)
assert map_.mask.shape == tuple(galaxy.shape)
assert (map_.masked.data == map_.value).all()
assert (map_.masked.mask == map_.mask.astype(bool)).all()
assert map_.snr == pytest.approx(np.abs(map_.value * np.sqrt(map_.ivar)))
assert datamodel[map_.getMaps()._dapver][map_.datamodel.full()].unit == map_.unit
def test_plot(self, map_):
fig, ax = map_.plot()
assert isinstance(fig, matplotlib.figure.Figure)
assert isinstance(ax, matplotlib.axes._subplots.Subplot)
assert 'Make single panel map or one panel of multi-panel map plot.' in map_.plot.__doc__
@marvin_test_if(mark='skip', map_={'data_origin': ['db']})
def test_save_and_restore(self, temp_scratch, map_):
fout = temp_scratch / 'test_map.mpf'
map_.save(str(fout))
assert fout.exists() is True
map_restored = Map.restore(str(fout), delete=True)
assert tuple(map_.shape) == tuple(map_restored.shape)
@pytest.mark.parametrize('property_name, channel',
[('emline_gflux', 'ha_6564'),
('stellar_vel', None)])
def test_deepcopy(self, galaxy, property_name, channel):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property_name, channel=channel)
map2 = deepcopy(map1)
for attr in vars(map1):
if not attr.startswith('_'):
value = getattr(map1, attr)
value2 = getattr(map2, attr)
if isinstance(value, np.ndarray):
assert np.isclose(value, value2).all()
elif isinstance(value, np.ma.core.MaskedArray):
assert (np.isclose(value.data, value2.data).all() and
(value.mask == value2.mask).all())
elif isinstance(value, Maskbit) or isinstance(value[0], Maskbit):
if isinstance(value, Maskbit):
value = [value]
value2 = [value2]
for mb, mb2 in zip(value, value2):
for it in ['bits', 'description', 'labels', 'mask', 'name']:
assert getattr(mb, it) == getattr(mb2, it)
assert (mb.schema == mb2.schema).all().all()
elif isinstance(value, Maps):
pass
else:
assert value == value2, attr
def test_getMap_invalid_property(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='mythical_property')
assert 'Your input value is too ambiguous.' in str(ee.value)
def test_getMap_invalid_channel(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='emline_gflux', channel='mythical_channel')
assert 'Your input value is too ambiguous.' in str(ee.value)
@marvin_test_if(mark='include', maps={'plateifu': '8485-1901',
'release': 'DR17',
'mode': 'local',
'data_origin': 'file'})
def test_quatities_reorder(self, maps):
"""Asserts the unit survives a quantity reorder (issue #374)."""
ha = maps['emline_gflux_ha']
assert ha is not None
assert ha.unit is not None
reordered_ha = np.moveaxis(ha, 0, -1)
assert reordered_ha.unit is not None
@marvin_test_if(mark='include', maps={'plateifu': '8485-1901',
'release': 'DR17',
'bintype': ['HYB10']})
def test_get_spaxel(self, maps):
"""Tests `.Map.getSpaxel`."""
ha = maps['emline_gflux_ha']
spaxel = ha.getSpaxel(x=10, y=10, xyorig='lower')
assert spaxel is not None
assert spaxel.x == 10 and spaxel.y == 10
@marvin_test_if(mark='skip', galaxy=dict(release=['DR17']))
def test_stellar_sigma_values(self, maps, galaxy):
''' Assert values for stellar_sigma and stellar_sigmacorr are different (issue #411) '''
ss = maps.stellar_sigma
sc = maps.stellar_sigmacorr
compare = sum(ss.value == sc.value)
assert len(np.unique(compare)) > 1
x = galaxy.dap['x']
y = galaxy.dap['y']
ssvalue = galaxy.dap['stellar_sigma'][galaxy.bintype.name]
scvalue = galaxy.dap['stellar_sigmacorr'][galaxy.bintype.name]
assert ssvalue == pytest.approx(ss[x, y].value, 1e-4)
assert scvalue == pytest.approx(sc[x, y].value, 1e-4)
def test_datamodel(self, maps):
gew_ha = maps.emline_gew_ha_6564
assert gew_ha.datamodel.description == ('Gaussian-fitted equivalent widths measurements '
'(based on EMLINE_GFLUX). Channel = H-alpha 6564.')
@marvin_test_if(mark='include', galaxy=dict(release=['DR15']))
def test_stellar_sigma_mpl6(self, maps, galaxy):
with pytest.raises(MarvinError) as cm:
__ = maps.stellar_sigmacorr
assert 'stellar_sigmacorr is unreliable in DR15. Please use DR17.' in str(cm.value)
class TestMapArith(object):
def test_add_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha + 10.
assert ha10.value == pytest.approx(ha.value + 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_add_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. + ha
assert ha10.value == pytest.approx(ha.value + 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_subtract_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha - 10.
assert ha10.value == pytest.approx(ha.value - 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_subtract_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. - ha
assert ha10.value == pytest.approx(10. - ha.value)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_multiply_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha * 10.
assert ha10.value == pytest.approx(ha.value * 10.)
assert ha10.ivar == pytest.approx(ha.ivar / 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_multiply_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. * ha
assert ha10.value == pytest.approx(ha.value * 10.)
assert ha10.ivar == pytest.approx(ha.ivar / 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_divide_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha / 10.
assert ha10.value == pytest.approx(ha.value / 10.)
assert ha10.ivar == pytest.approx(ha.ivar * 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_reflexive_divide_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = 10. / ha
assert ha10.value == pytest.approx(10. / ha.value)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
@pytest.mark.parametrize('ivar1, ivar2, expected',
[(ivar1, ivar2, ivar_sum12)])
def test_add_ivar(self, ivar1, ivar2, expected):
assert Map._add_ivar(ivar1, ivar2) == pytest.approx(expected)
@pytest.mark.parametrize('ivar1, ivar2, value1, value2, value_prod12, expected',
[(ivar1, ivar2, value1, value2, value_prod12, ivar_prod12)])
def test_mul_ivar(self, ivar1, ivar2, value1, value2, value_prod12, expected):
ivar = Map._mul_ivar(ivar1, ivar2, value1, value2, value_prod12)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert ivar == pytest.approx(expected)
@pytest.mark.parametrize('power, expected',
[(2, ivar_pow_2),
(0.5, ivar_pow_05),
(0, ivar_pow_0),
(-1, ivar_pow_m1),
(-2, ivar_pow_m2),
(-0.5, ivar_pow_m05)])
@pytest.mark.parametrize('ivar, value,',
[(ivar1, value1)])
def test_pow_ivar(self, ivar, value, power, expected):
ivar = Map._pow_ivar(ivar, value, power)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert ivar == pytest.approx(expected)
@pytest.mark.parametrize('power', [2, 0.5, 0, -1, -2, -0.5])
def test_pow_ivar_none(self, power):
assert Map._pow_ivar(None, np.arange(4), power) == pytest.approx(np.zeros(4))
@pytest.mark.parametrize('ivar, value, expected',
[(ivar1, value2, ivar_log1)])
def test_log10_ivar(self, ivar, value, expected):
actual = Map._log10_ivar(ivar, value)
assert actual == pytest.approx(expected)
def test_log10(self, maps_release_only):
niiha = maps_release_only.emline_gflux_nii_6585 / maps_release_only.emline_gflux_nii_6585
log_niiha = np.log10(niiha)
ivar = np.log10(np.e) * niiha.ivar**-0.5 / niiha.value
assert log_niiha.value == pytest.approx(np.log10(niiha.value), nan_ok=True)
assert log_niiha.ivar == pytest.approx(ivar, nan_ok=True)
assert (log_niiha.mask == niiha.mask).all()
assert log_niiha.unit == u.dimensionless_unscaled
@pytest.mark.runslow
@marvin_test_if(mark='skip', ufunc=['log10'])
@pytest.mark.parametrize('ufunc', ufuncs)
def test_np_ufunc_notimplemented(self, maps_release_only, ufunc):
ha = maps_release_only.emline_gflux_ha_6564
nii = maps_release_only.emline_gflux_nii_6585
with pytest.raises(NotImplementedError) as ee:
if getattr(getattr(np, ufunc), 'nargs') <= 2:
getattr(np, ufunc)(ha)
else:
getattr(np, ufunc)(nii, ha)
expected = 'np.{0} is not implemented for Map.'.format(getattr(np, ufunc).__name__)
assert str(ee.value) == expected
@pytest.mark.parametrize('unit1, unit2, op, expected',
[(u_flux, u_flux, '+', u_flux),
(u_flux, u_flux, '-', u_flux),
(u_flux, u_flux, '*', u_flux2),
(u_flux, u_flux, '/', u.dimensionless_unscaled),
(u.km, u.s, '*', u.km * u.s),
(u.km, u.s, '/', u.km / u.s)])
def test_unit_propagation(self, unit1, unit2, op, expected):
assert Map._unit_propagation(unit1, unit2, op) == expected
@pytest.mark.parametrize('unit1, unit2, op',
[(u_flux, u.km, '+'),
(u_flux, u.km, '-')])
def test_unit_propagation_mismatch(self, unit1, unit2, op):
with pytest.warns(UserWarning):
assert Map._unit_propagation(unit1, unit2, op) is None
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_add_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 + map2
assert map12.value == pytest.approx(map1.value + map2.value)
assert map12.ivar == pytest.approx(map1._add_ivar(map1.ivar, map2.ivar))
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_subtract_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 - map2
assert map12.value == pytest.approx(map1.value - map2.value)
assert map12.ivar == pytest.approx(map1._add_ivar(map1.ivar, map2.ivar))
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_multiply_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 * map2
ivar = map1._mul_ivar(map1.ivar, map2.ivar, map1.value, map2.value, map12.value)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert map12.value == pytest.approx(map1.value * map2.value)
assert map12.ivar == pytest.approx(ivar)
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_divide_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 / map2
ivar = map1._mul_ivar(map1.ivar, map2.ivar, map1.value, map2.value, map12.value)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
mask = map1.mask | map2.mask
bad = np.isnan(map12.value) | np.isinf(map12.value)
mask[bad] = mask[bad] | map12.pixmask.labels_to_value('DONOTUSE')
with np.errstate(divide='ignore', invalid='ignore'):
assert map12.value == pytest.approx(map1.value / map2.value, nan_ok=True)
assert map12.ivar == pytest.approx(ivar)
assert map12.mask == pytest.approx(mask)
@pytest.mark.runslow
@pytest.mark.parametrize('power', [2, 0.5, 0, -1, -2, -0.5])
@pytest.mark.parametrize('property_name, channel',
[('emline_gflux', 'ha_6564'),
('stellar_vel', None)])
def test_pow(self, galaxy, property_name, channel, power):
maps = Maps(plateifu=galaxy.plateifu)
map_orig = maps.getMap(property_name=property_name, channel=channel)
map_new = map_orig**power
sig_orig = np.sqrt(1. / map_orig.ivar)
sig_new = map_new.value * power * sig_orig * map_orig.value
ivar_new = 1 / sig_new**2.
ivar_new[np.isnan(ivar_new)] = 0
ivar_new[np.isinf(ivar_new)] = 0
assert map_new.value == pytest.approx(map_orig.value**power, nan_ok=True)
assert map_new.ivar == pytest.approx(ivar_new)
assert (map_new.mask == map_orig.mask).all()
@marvin_test_if(mark='skip', galaxy=dict(release=['MPL-4', 'MPL-6']))
def test_stellar_sigma_correction(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
stsig = maps['stellar_sigma']
stsigcorr = maps['stellar_sigmacorr']
expected = (stsig**2 - stsigcorr**2)**0.5
expected.ivar = (expected.value / stsig.value) * stsig.ivar
expected.ivar[stsig.ivar == 0] = 0
expected.ivar[stsigcorr.value >= stsig.value] = 0
expected.value[stsigcorr.value >= stsig.value] = 0
actual = stsig.inst_sigma_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
assert actual.datamodel == stsig.datamodel
@marvin_test_if(mark='include', galaxy=dict(release=['MPL-4', 'MPL-6']))
def test_stellar_sigma_correction_MPL4(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
stsig = maps['stellar_sigma']
if galaxy.release == 'MPL-4':
errmsg = 'Instrumental broadening correction not implemented for MPL-4.'
elif galaxy.release == 'MPL-6':
errmsg = 'The stellar sigma corrections in MPL-6 are unreliable. Please use MPL-7.'
with pytest.raises(MarvinError) as ee:
stsig.inst_sigma_correction()
assert errmsg in str(ee.value)
def test_stellar_sigma_correction_invalid_property(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
with pytest.raises(MarvinError) as ee:
ha.inst_sigma_correction()
assert ('Cannot correct {0}_{1} '.format(ha.datamodel.name, ha.datamodel.channel) +
'for instrumental broadening.') in str(ee.value)
def test_emline_sigma_correction(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
hasig = maps['emline_gsigma_ha_6564']
emsigcorr = maps['emline_instsigma_ha_6564']
expected = (hasig**2 - emsigcorr**2)**0.5
expected.ivar = (expected.value / hasig.value) * hasig.ivar
expected.ivar[hasig.ivar == 0] = 0
expected.ivar[emsigcorr.value >= hasig.value] = 0
expected.value[emsigcorr.value >= hasig.value] = 0
actual = hasig.inst_sigma_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
assert actual.datamodel == hasig.datamodel
@marvin_test_if(mark='skip', galaxy=dict(release=['MPL-4', 'MPL-5']))
@pytest.mark.parametrize('channel, op',
[('hb', '*'),
('d4000', '*'),
('cn1', '+'),
])
def test_specindex_sigma_correction(self, galaxy, channel, op):
maps = Maps(plateifu=galaxy.plateifu)
si = maps['specindex_' + channel]
sicorr = maps['specindex_corr' + channel]
ops = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv}
expected = ops[op](si, sicorr)
actual = si.specindex_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
assert actual.datamodel == si.datamodel
class TestMaskbit(object):
def test_masked(self, maps_release_only):
params = maps_release_only.datamodel.parent.get_default_plot_params()
ha = maps_release_only['emline_gflux_ha_6564']
expected = ha.pixmask.get_mask(params['default']['bitmasks'], dtype=bool)
assert ha.masked.data == pytest.approx(ha.value)
assert (ha.masked.mask == expected).all()
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_values_to_bits_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_bits(1) == [0]
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
def test_values_to_bits(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_bits(3) == [0, 1]
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_values_to_labels_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_labels(1) == ['DONOTUSE']
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
def test_values_to_labels(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_labels(3) == ['NOCOV', 'LOWCOV']
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_labels_to_value_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.labels_to_value('DONOTUSE') == 1
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
@pytest.mark.parametrize('names, expected',
[(['NOCOV', 'LOWCOV'], 3),
('DONOTUSE', 1073741824)])
def test_labels_to_value(self, maps_release_only, names, expected):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.labels_to_value(names) == expected
@pytest.mark.parametrize('flag',
['manga_target1',
'manga_target2',
'manga_target3',
'target_flags',
'pixmask'])
def test_flag(self, flag, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert getattr(ha, flag, None) is not None
class TestEnhancedMap(object):
def test_overridden_methods(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
nii = maps['emline_gflux_nii_6585']
n2ha = nii / ha
assert isinstance(n2ha, EnhancedMap)
methods = ['_init_map_from_maps', '_get_from_file', '_get_from_db', '_get_from_api',
'inst_sigma_correction']
for method in methods:
with pytest.raises(AttributeError) as ee:
meth = getattr(n2ha, method)
meth()
assert "'EnhancedMap' has no attribute '{}'.".format(method) in str(ee.value)
| {
"content_hash": "2d6a567c3d5013bf1665c9dee17d6a4a",
"timestamp": "",
"source": "github",
"line_count": 623,
"max_line_length": 99,
"avg_line_length": 41.53611556982344,
"alnum_prop": 0.5752598832940449,
"repo_name": "sdss/marvin",
"id": "de7afc77bc3760211053be8f8667d670d6122d15",
"size": "26192",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/tools/test_map.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "166739"
},
{
"name": "HTML",
"bytes": "91250"
},
{
"name": "JavaScript",
"bytes": "247561"
},
{
"name": "PLpgSQL",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "1706012"
},
{
"name": "SCSS",
"bytes": "266310"
},
{
"name": "Shell",
"bytes": "1150"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.logic import LogicManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-logic
# USAGE
python reset_trigger.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = LogicManagementClient(
credential=DefaultAzureCredential(),
subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
)
response = client.workflow_triggers.reset(
resource_group_name="testResourceGroup",
workflow_name="testWorkflow",
trigger_name="testTrigger",
)
print(response)
# x-ms-original-file: specification/logic/resource-manager/Microsoft.Logic/stable/2019-05-01/examples/WorkflowTriggers_Reset.json
if __name__ == "__main__":
main()
| {
"content_hash": "66d550ceae91a0f88cf97ca9b314f821",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 129,
"avg_line_length": 32.94117647058823,
"alnum_prop": 0.7285714285714285,
"repo_name": "Azure/azure-sdk-for-python",
"id": "62c132380526a7fc6da97598a2158c5fdb02c773",
"size": "1588",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/logic/azure-mgmt-logic/generated_samples/reset_trigger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from functools import partial
from io import IOBase
from numbers import Number
from unittest.mock import MagicMock, Mock, PropertyMock, call, patch, ANY
import pytest
from boxsdk.auth.oauth2 import OAuth2
from boxsdk.config import API, Proxy
from boxsdk.exception import BoxAPIException, BoxException
from boxsdk.network.default_network import DefaultNetwork, DefaultNetworkResponse
from boxsdk.session.box_response import BoxResponse
from boxsdk.session.session import Session, Translator, AuthorizedSession
@pytest.fixture(scope='function', params=[False, True])
def translator(default_translator, request): # pylint:disable=unused-argument
if request.param:
return Translator(extend_default_translator=True, new_child=True)
return None
@pytest.fixture
def initial_access_token():
return 'fake_access_token'
@pytest.fixture
def mock_oauth(initial_access_token):
mock_oauth = MagicMock(OAuth2)
mock_oauth.access_token = initial_access_token
return mock_oauth
@pytest.fixture
def mock_network_layer():
return Mock(DefaultNetwork)
@pytest.fixture
def unauthorized_session(mock_network_layer, translator):
# pylint:disable=redefined-outer-name
return Session(network_layer=mock_network_layer, translator=translator)
@pytest.fixture
def box_session(mock_oauth, mock_network_layer, translator):
# pylint:disable=redefined-outer-name
return AuthorizedSession(oauth=mock_oauth, network_layer=mock_network_layer, translator=translator)
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
])
def test_box_session_handles_unauthorized_response(
test_method,
box_session,
mock_oauth,
mock_network_layer,
unauthorized_response,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
def get_access_token_from_auth_object():
return mock_oauth.access_token
mock_network_layer.request.side_effect = mock_responses = [unauthorized_response, generic_successful_response]
for mock_response in mock_responses:
type(mock_response).access_token_used = PropertyMock(side_effect=get_access_token_from_auth_object)
def refresh(access_token_used):
assert access_token_used == mock_oauth.access_token
mock_oauth.access_token = 'fake_new_access_token'
return (mock_oauth.access_token, None)
mock_oauth.refresh.side_effect = refresh
box_response = test_method(box_session, url=test_url)
assert box_response.status_code == 200
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
])
@pytest.mark.parametrize('initial_access_token', [None])
def test_box_session_gets_access_token_before_request(
test_method,
box_session,
mock_oauth,
mock_network_layer,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
def get_access_token_from_auth_object():
return mock_oauth.access_token
mock_network_layer.request.side_effect = mock_responses = [generic_successful_response]
for mock_response in mock_responses:
type(mock_response).access_token_used = PropertyMock(side_effect=get_access_token_from_auth_object)
def refresh(access_token_used):
assert access_token_used == mock_oauth.access_token
mock_oauth.access_token = 'fake_new_access_token'
return (mock_oauth.access_token, None)
mock_oauth.refresh.side_effect = refresh
box_response = test_method(box_session, url=test_url, auto_session_renewal=True)
assert box_response.status_code == 200
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
partial(Session.request, method='head'),
])
def test_box_session_retries_response_after_retry_after(
test_method,
box_session,
mock_network_layer,
retry_after_response,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [retry_after_response, generic_successful_response]
mock_network_layer.retry_after.side_effect = lambda delay, request, *args, **kwargs: request(*args, **kwargs)
with patch('random.uniform', return_value=0.68):
box_response = test_method(box_session, url=test_url)
assert box_response.status_code == 200
assert len(mock_network_layer.retry_after.call_args_list) == 1
assert isinstance(mock_network_layer.retry_after.call_args[0][0], Number)
assert round(mock_network_layer.retry_after.call_args[0][0], 4) == 1
@pytest.mark.parametrize('test_method', [
Session.get,
Session.post,
Session.put,
Session.delete,
Session.options,
partial(Session.request, method='head'),
])
def test_box_session_retries_request_after_server_error(
test_method,
box_session,
mock_network_layer,
server_error_response,
generic_successful_response,
test_url,
):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [server_error_response, server_error_response, generic_successful_response]
mock_network_layer.retry_after.side_effect = lambda delay, request, *args, **kwargs: request(*args, **kwargs)
with patch('random.uniform', return_value=0.68):
box_response = test_method(box_session, url=test_url)
assert box_response.status_code == 200
assert box_response.json() == generic_successful_response.json()
assert box_response.ok == generic_successful_response.ok
assert box_response.content == generic_successful_response.content
assert len(mock_network_layer.retry_after.call_args_list) == 2
assert isinstance(mock_network_layer.retry_after.call_args_list[0][0][0], Number)
assert isinstance(mock_network_layer.retry_after.call_args_list[1][0][0], Number)
assert round(mock_network_layer.retry_after.call_args_list[0][0][0], 4) == 1.18
assert round(mock_network_layer.retry_after.call_args_list[1][0][0], 4) == 2.36
def test_box_session_seeks_file_after_retry(box_session, mock_network_layer, server_error_response, generic_successful_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [server_error_response, generic_successful_response]
mock_network_layer.retry_after.side_effect = lambda delay, request, *args, **kwargs: request(*args, **kwargs)
mock_file_1, mock_file_2 = MagicMock(IOBase), MagicMock(IOBase)
mock_file_1.tell.return_value = 0
mock_file_2.tell.return_value = 3
files = {'file': ('unused', mock_file_1), 'f2': ('unused', mock_file_2)}
box_response = box_session.post(url=test_url, files=files)
assert box_response.status_code == 200
assert box_response.json() == generic_successful_response.json()
assert box_response.ok == generic_successful_response.ok
mock_file_1.tell.assert_called_with()
mock_file_2.tell.assert_called_with()
mock_file_1.seek.assert_called_with(0)
assert mock_file_1.seek.call_count == 2
assert mock_file_1.seek.has_calls(call(0) * 2)
mock_file_2.seek.assert_called_with(3)
assert mock_file_2.seek.call_count == 2
assert mock_file_2.seek.has_calls(call(3) * 2)
def test_box_session_raises_for_non_json_response(box_session, mock_network_layer, non_json_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [non_json_response]
with pytest.raises(BoxAPIException):
box_session.get(url=test_url)
def test_box_session_raises_for_failed_response(box_session, mock_network_layer, bad_network_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [bad_network_response]
with pytest.raises(BoxAPIException):
box_session.get(url=test_url)
def test_box_session_raises_for_failed_response_with_error_and_error_description(box_session, mock_network_layer, bad_network_response_400, test_url):
mock_network_layer.request.side_effect = [bad_network_response_400]
try:
box_session.get(url=test_url)
pytest.fail('Should throw exception because of bad network response')
except BoxAPIException as exception:
assert exception.code == 'Example Error'
assert exception.message == 'Example Error Description'
def test_box_session_raises_for_failed_non_json_response(box_session, mock_network_layer, failed_non_json_response, test_url):
# pylint:disable=redefined-outer-name
mock_network_layer.request.side_effect = [failed_non_json_response]
with pytest.raises(BoxAPIException):
box_session.get(url=test_url, expect_json_response=False)
def test_box_response_properties_pass_through_to_network_response_properties():
mock_network_response = Mock(DefaultNetworkResponse)
box_result = BoxResponse(mock_network_response)
assert box_result.json() == mock_network_response.json()
assert box_result.content == mock_network_response.content
assert box_result.ok == mock_network_response.ok
assert box_result.status_code == mock_network_response.status_code
assert box_result.network_response == mock_network_response
def test_translator(box_session, translator, default_translator, original_default_translator):
assert isinstance(box_session.translator, Translator)
assert box_session.translator == default_translator
if translator:
assert box_session.translator is translator
# Test that adding new registrations works.
class Foo:
pass
item_type = 'ƒøø'
box_session.translator.register(item_type, Foo)
assert box_session.translator.get(item_type) is Foo
# Test that adding new registrations does not affect global state.
assert default_translator == original_default_translator
assert (set(box_session.translator) - set(default_translator)) == {item_type}
def test_session_uses_global_config(box_session, mock_network_layer, generic_successful_response, monkeypatch):
mock_network_layer.request.side_effect = generic_successful_response
example_dot_com = 'https://example.com/'
monkeypatch.setattr(API, 'BASE_API_URL', example_dot_com)
assert example_dot_com in box_session.get_url('foo', 'bar')
def test_session_uses_local_config(box_session, mock_network_layer, generic_successful_response, monkeypatch):
mock_network_layer.request.side_effect = generic_successful_response
example_dot_com = 'https://example.com/'
box_session.api_config.BASE_API_URL = example_dot_com
monkeypatch.setattr(API, 'BASE_API_URL', 'https://api.box.com')
assert example_dot_com in box_session.get_url('foo', 'bar')
@pytest.mark.parametrize(
'attempt_number,retry_after_header,expected_result',
[
(0, '', 1.18),
(1, '', 2.36),
(2, '', 4.72),
(3, '', 9.44),
(4, '', 18.88),
]
)
def test_get_retry_after_time(box_session, attempt_number, retry_after_header, expected_result):
with patch('random.uniform', return_value=0.68):
retry_time = box_session.get_retry_after_time(attempt_number, retry_after_header) # pylint: disable=protected-access
retry_time = round(retry_time, 4)
assert retry_time == expected_result
@pytest.mark.parametrize(
'test_proxy_url,test_proxy_auth,expected_proxy_dict',
[
('http://example-proxy.com', {'user': 'test_user', 'password': 'test_password', },
{'http': 'http://test_user:test_password@example-proxy.com', 'https': 'http://test_user:test_password@example-proxy.com'}),
('http://example-proxy.com', None, {'http': 'http://example-proxy.com', 'https': 'http://example-proxy.com'}),
]
)
def test_proxy_attaches_to_request_correctly(
box_session,
monkeypatch,
mock_network_layer,
generic_successful_response,
test_proxy_url, test_proxy_auth,
expected_proxy_dict):
monkeypatch.setattr(Proxy, 'URL', test_proxy_url)
monkeypatch.setattr(Proxy, 'AUTH', test_proxy_auth)
mock_network_layer.request.side_effect = [generic_successful_response]
box_session.request('GET', test_proxy_url)
mock_network_layer.request.assert_called_once_with(
'GET',
test_proxy_url,
access_token='fake_access_token',
headers=ANY,
proxies=expected_proxy_dict,
log_response_content=True,
)
def test_proxy_malformed_dict_does_not_attach(box_session, monkeypatch, mock_network_layer, generic_successful_response):
test_proxy_url = 'http://example.com'
test_proxy_auth = {
'foo': 'bar',
}
monkeypatch.setattr(Proxy, 'URL', test_proxy_url)
monkeypatch.setattr(Proxy, 'AUTH', test_proxy_auth)
mock_network_layer.request.side_effect = [generic_successful_response]
with pytest.raises(BoxException) as exc_info:
box_session.request('GET', test_proxy_url)
assert isinstance(exc_info.value, BoxException)
assert exc_info.value.args[0] == "The proxy auth dict you provided does not match pattern " \
"{'user': 'example_user', 'password': 'example_password'}"
def test_proxy_network_config_property(box_session):
assert isinstance(box_session.proxy_config, Proxy)
| {
"content_hash": "3d4124767653a7c69a4e824610ff5c64",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 150,
"avg_line_length": 38.56733524355301,
"alnum_prop": 0.7043090638930164,
"repo_name": "box/box-python-sdk",
"id": "3b36bead2d18ffd97816ad6bbf45d33bbb0b6120",
"size": "13463",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/unit/session/test_session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1036959"
},
{
"name": "Smarty",
"bytes": "527"
}
],
"symlink_target": ""
} |
import re
from urlparse import urlparse, parse_qs
def _get_url_components(the_url):
u = urlparse(the_url)
names = ['scheme', 'netloc', 'path', 'params', 'query', 'fragment']
return {names[x]:u[x] for x in range(6)}
## weblogic_default
extract_rule = re.compile('(?P<ip>\d+\.\d+\.\d+\.\d+) - (?P<user>[^\s]+) \[(?P<date>[^\]]+)\] "(?P<request>[^"]+)" (?P<status>\d+) (?P<response_size>\d+)')
request_details = re.compile('(?P<req_type>GET|POST) (?P<full_url>[^\s]+) (?P<http_version>.*)')
#kept_tokens['req_params'] = ";".join( sorted( self.parse_qs(o.query).keys() ) )
def weblogic_default( the_line ):
result = extract_rule.match(the_line).groupdict()
result.update( request_details.match(result['request']).groupdict() )
result.update( _get_url_components( result['full_url'] ) )
return result
### errorlog_apache
extract_rule = re.compile( '\[(?P<date>[^\]]+)\]\s\[(?P<severity>[^\]]+)\](?P<log_message>.*)' )
def errorlog_apache( the_line ):
return extract_rule.match(the_line).groupdict()
### syslog_default
extract_rule = re.compile( '(?P<date>([^\s]+)\s+([^\s]+)\s([^\s]+)\s)(?P<server_service>[^\[]+)\[\d+\]:\s\[[^\]]+\]\s(?P<syslog_message>.*)' )
def syslog_default( the_line ):
return extract_rule.match(the_line).groupdict() | {
"content_hash": "d716112b7d2ac7452ae1eb9bec7e267c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 155,
"avg_line_length": 40.38709677419355,
"alnum_prop": 0.6118210862619808,
"repo_name": "P4l1ndr0m/P.P.L.S.",
"id": "a416a63ddf0e9b62fc729c3899c54624bc9452ea",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "log_formats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6826"
}
],
"symlink_target": ""
} |
import copy
import unittest
import mock
from .context import pymolprobity
import pymolprobity.kinemage as kin
class KinemageTests(unittest.TestCase):
def setUp(self):
self.kw_types = ['viewid', 'group', 'subgroup', 'master',
'pointmaster', 'dotlist', 'vectorlist']
self.kin = kin.Kinemage()
for i, kw in enumerate(self.kw_types):
self.kin.keywords[i] = {'keyword': kw,
'data': '{} data'.format(kw)}
# duplicate each item to test get_unique
self.kin2 = copy.deepcopy(self.kin)
for i in range(0, 7):
self.kin2.keywords[i-7] = self.kin2.keywords[i]
def tearDown(self):
self.kin = None
def test_get_all_keywords_of_type(self):
for kw in self.kw_types:
res = self.kin.get_all_keywords_of_type(kw)
self.assertEqual(len(res), 1)
self.assertEqual(res[0], '{} data'.format(kw))
def test_get_unique_keywords_of_type(self):
for kw in self.kw_types:
res = self.kin2.get_unique_keywords_of_type(kw)
self.assertEqual(len(res), 1)
self.assertEqual(res[0], '{} data'.format(kw))
@mock.patch('pymolprobity.kinemage.Kinemage.get_all_keywords_of_type')
def test_viewids(self, mock_get_all):
res = self.kin.viewids()
ref = mock_get_all.return_value
mock_get_all.assert_called_once_with('viewid')
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.Kinemage.get_all_keywords_of_type')
def test_groups(self, mock_get_all):
res = self.kin.groups()
ref = mock_get_all.return_value
mock_get_all.assert_called_once_with('group')
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.Kinemage.get_unique_keywords_of_type')
def test_subgroups(self, mock_get_unique):
res = self.kin.subgroups()
ref = mock_get_unique.return_value
mock_get_unique.assert_called_once_with('subgroup')
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.Kinemage.get_unique_keywords_of_type')
def test_masters(self, mock_get_unique):
res = self.kin.masters()
ref = mock_get_unique.return_value
mock_get_unique.assert_called_once_with('master')
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.Kinemage.get_unique_keywords_of_type')
def test_pointmasters(self, mock_get_unique):
res = self.kin.pointmasters()
ref = mock_get_unique.return_value
mock_get_unique.assert_called_once_with('pointmaster')
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.Kinemage.get_all_keywords_of_type')
def test_dotlists(self, mock_get_all):
res = self.kin.dotlists()
ref = mock_get_all.return_value
mock_get_all.assert_called_once_with('dotlist')
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.Kinemage.get_all_keywords_of_type')
def test_vectorlists(self, mock_get_all):
res = self.kin.vectorlists()
ref = mock_get_all.return_value
mock_get_all.assert_called_once_with('vectorlist')
self.assertEqual(res, ref)
class KinemageDrawMethodTests(unittest.TestCase):
# TODO
pass
class ProcessKinemageTests(unittest.TestCase):
def setUp(self):
self.context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
@mock.patch('pymolprobity.kinemage.points.process_dotlist')
def test_calls_process_dotlist_with_dotlists(self,
mock_proc_dotlist):
inp = '@dotlist blah'
mock_proc_dotlist.return_value = 'val'
k = kin.process_kinemage(inp)
mock_proc_dotlist.assert_called_once_with(
['dotlist blah'], self.context)
ref = {'keyword': 'dotlist', 'data': 'val'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.points.process_vectorlist')
def test_calls_process_vectorlist_with_vectorlists(self,
mock_proc_vectorlist):
inp = '@vectorlist blah'
mock_proc_vectorlist.return_value = 'val'
k = kin.process_kinemage(inp)
mock_proc_vectorlist.assert_called_once_with(
['vectorlist blah'], self.context)
ref = {'keyword': 'vectorlist', 'data': 'val'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.process_viewid')
def test_calls_process_viewid_with_viewids(self,
mock_proc_viewid):
inp = '@viewid blah'
mock_proc_viewid.return_value = 'val'
k = kin.process_kinemage(inp)
mock_proc_viewid.assert_called_once_with(
['viewid blah'], self.context)
ref = {'keyword': 'viewid', 'data': 'val'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.process_master')
def test_calls_process_master_with_master(self,
mock_proc_master):
inp = '@master blah'
mock_proc_master.return_value = 'val'
k = kin.process_kinemage(inp)
mock_proc_master.assert_called_once_with(
['master blah'], self.context)
ref = {'keyword': 'master', 'data': 'val'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.process_pointmaster')
def test_calls_process_pointmaster_with_pointmaster(self,
mock_proc_pm):
inp = '@pointmaster blah'
mock_proc_pm.return_value = 'val'
k = kin.process_kinemage(inp)
mock_proc_pm.assert_called_once_with(
['pointmaster blah'], self.context)
ref = {'keyword': 'pointmaster', 'data': 'val'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.process_kinemage_keyword')
def test_calls_process_kinemage_keyword_with_kinemage(self,
mock_proc_kin):
inp = '@kinemage blah'
mock_proc_kin.return_value = 'val'
k = kin.process_kinemage(inp)
mock_proc_kin.assert_called_once_with(
['kinemage blah'], self.context)
ref = {'keyword': 'kinemage', 'data': 'val'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.process_group')
def test_calls_process_group_with_group(self,
mock_proc):
inp = '@group blah'
mock_proc.return_value = 'val'
k = kin.process_kinemage(inp)
mock_proc.assert_called_once_with(
['group blah'], self.context)
ref = {'keyword': 'group', 'data': 'val'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.process_subgroup')
def test_calls_process_subgroup_with_subgroup(self,
mock_proc):
inp = '@subgroup blah'
context = {}
mock_proc.return_value = 'data'
k = kin.process_kinemage(inp)
mock_proc.assert_called_once_with(
['subgroup blah'], self.context)
ref = {'keyword': 'subgroup', 'data': 'data'}
self.assertEqual(k.keywords[0], ref)
@mock.patch('pymolprobity.kinemage.points.process_vectorlist')
@mock.patch('pymolprobity.kinemage.points.process_dotlist')
def test_with_skipped_keyword(self, mock_proc_dotlist,
mock_proc_vectorlist):
inp = '@text something'
kin.process_kinemage(inp)
mock_proc_dotlist.assert_not_called()
mock_proc_vectorlist.assert_not_called()
# TODO: test prints debug message
@mock.patch('pymolprobity.kinemage.logger')
def test_with_unknown_keyword(self, mock_logger):
inp = '@not_a_keyword blah'
k = kin.process_kinemage(inp)
mock_logger.warning.assert_called_with('Unknown keyword: not_a_keyword')
@mock.patch('pymolprobity.kinemage.process_master')
@mock.patch('pymolprobity.kinemage.process_kinemage_keyword')
def test_kinemage_keyword_updates_context(self, mock_proc_kin,
mock_proc_master):
inp = '@kinemage blah\n@master blah'
context = self.context
context['kinemage'] = mock_proc_kin.return_value
kin.process_kinemage(inp)
mock_proc_master.assert_called_once_with(['master blah'], context)
@mock.patch('pymolprobity.kinemage.process_master')
@mock.patch('pymolprobity.kinemage.process_group')
def test_group_updates_context(self, mock_proc_group,
mock_proc_master):
inp = '@group blah\n@master blah'
context = self.context
context['group'] = mock_proc_group.return_value
kin.process_kinemage(inp)
mock_proc_master.assert_called_once_with(['master blah'], context)
@mock.patch('pymolprobity.kinemage.process_master')
@mock.patch('pymolprobity.kinemage.process_group')
def test_none_group_updates_context(self, mock_proc_group,
mock_proc_master):
inp = '@group blah\n@group blah\n@master blah'
mock_proc_group.side_effect = ( ['reduce', 'animate'], None )
context1 = copy.deepcopy(self.context)
context1['group'] = ['reduce', 'animate']
context1['animate'] = 1
context2 = copy.deepcopy(context1)
context2['group'] = None # from 2nd group
context2['animate'] = 0
kin.process_kinemage(inp)
mock_proc_group.assert_has_calls(
[mock.call(['group blah'], self.context),
mock.call(['group blah'], context1)])
mock_proc_master.assert_called_once_with(['master blah'], context2)
@mock.patch('pymolprobity.kinemage.process_master')
@mock.patch('pymolprobity.kinemage.process_group')
def test_animate_group_updates_context(self, mock_proc_group,
mock_proc_master):
inp = '@group blah\n@master blah'
mock_proc_group.return_value = ['reduce', 'animate']
context = self.context
context['group'] = mock_proc_group.return_value
context['animate'] = 1
kin.process_kinemage(inp)
mock_proc_master.assert_called_once_with(['master blah'], context)
@mock.patch('pymolprobity.kinemage.process_master')
@mock.patch('pymolprobity.kinemage.process_group')
def test_non_animate_group_updates_context(self, mock_proc_group,
mock_proc_master):
inp = '@group blah\n@group blah\n@master blah'
# first call sets animate = 1, second should reset it to 0
mock_proc_group.side_effect = [['animate'], ['blah']]
context = self.context
context['group'] = ['blah']
context['animate'] = 0
kin.process_kinemage(inp)
mock_proc_master.assert_called_once_with(['master blah'], context)
@mock.patch('pymolprobity.kinemage.process_master')
@mock.patch('pymolprobity.kinemage.process_subgroup')
def test_subgroup_updates_context(self, mock_proc_subgroup,
mock_proc_master):
inp = '@subgroup blah\n@master blah'
context = self.context
context['subgroup'] = mock_proc_subgroup.return_value
kin.process_kinemage(inp)
mock_proc_master.assert_called_once_with(['master blah'], context)
@mock.patch('pymolprobity.kinemage.logger')
class SingleLineKeywordCheckTests(unittest.TestCase):
def test_with_single_line(self, mock_logger):
inp = ['line 1']
kin.single_line_keyword_check(inp)
self.assertFalse(mock_logger.warning.called)
def test_with_multiple_lines(self, mock_logger):
inp = ['line 1', 'line 2']
kin.single_line_keyword_check(inp)
self.assertTrue(mock_logger.warning.called)
def test_with_non_list_input(self, mock_logger):
inp = 42
with self.assertRaises(ValueError):
kin.single_line_keyword_check(inp)
class ProcessViewidTests(unittest.TestCase):
def setUp(self):
self.base_context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
@mock.patch('pymolprobity.kinemage.single_line_keyword_check')
def test_calls_single_line_keyword_check(self, mock_check):
inp = ['blah']
res = kin.process_viewid(inp, self.base_context)
self.assertTrue(mock_check.called)
def test_with_first_viewid(self):
inp = ['viewid { Q28 A}']
res = kin.process_viewid(inp, self.base_context)
ref = {
'view_num': 1,
'flipped': False,
'resn': 'Q',
'resi': '28',
'alt': '',
'chain': 'A',
}
self.assertEqual(res, ref)
def test_with_second_or_later_viewid(self):
inp = ['2viewid { Q32 A}']
res = kin.process_viewid(inp, self.base_context)
self.assertEqual(res['view_num'], 2)
def test_with_3_digit_resnum(self):
inp = ['19viewid { Q277 A}']
ref = {
'view_num': 19,
'flipped': False,
'resn': 'Q',
'resi': '277',
'alt': '',
'chain': 'A',
}
res = kin.process_viewid(inp, self.base_context)
self.assertEqual(res, ref)
def test_with_flipped_asterisk(self):
inp = ['2viewid {*Q32 A}']
res = kin.process_viewid(inp, self.base_context)
self.assertTrue(res['flipped'])
def test_with_insertion_code(self):
inp = ['2viewid { Q32A A}']
res = kin.process_viewid(inp, self.base_context)
self.assertEqual(res['resi'], '32A')
@mock.patch('pymolprobity.kinemage.logger')
def test_with_bad_format(self, mock_logger):
inp = ['viewid bad bad bad']
res = kin.process_viewid(inp, self.base_context)
self.assertTrue(mock_logger.warning.called)
self.assertIsNone(res)
@mock.patch('pymolprobity.kinemage.single_line_keyword_check')
class ProcessMasterTests(unittest.TestCase):
def setUp(self):
self.base_context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
def test_calls_single_line_keyword_check(self, mock_check):
inp = ['blah']
res = kin.process_master(inp, self.base_context)
self.assertTrue(mock_check.called)
def test_with_well_formed_master(self, mock_check):
inp = ['master {something}']
res = kin.process_master(inp, self.base_context)
self.assertEqual(res, 'something')
@mock.patch('pymolprobity.kinemage.single_line_keyword_check')
class ProcessPointmasterTests(unittest.TestCase):
def setUp(self):
self.base_context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
def test_calls_single_line_keyword_check(self, mock_check):
inp = ['blah']
res = kin.process_pointmaster(inp, self.base_context)
self.assertTrue(mock_check.called)
def test_with_well_formed_pointmaster(self, mock_check):
inp = ["pointmaster 'a' {something}"]
res = kin.process_pointmaster(inp, self.base_context)
ref = {'code': 'a', 'label': 'something', 'enable': 1}
self.assertEqual(res, ref)
def test_with_on_statement(self, mock_check):
inp = ["pointmaster 'a' {something} on"]
res = kin.process_pointmaster(inp, self.base_context)
ref = {'code': 'a', 'label': 'something', 'enable': 1}
self.assertEqual(res, ref)
def test_with_off_statement(self, mock_check):
inp = ["pointmaster 'a' {something} off"]
res = kin.process_pointmaster(inp, self.base_context)
ref = {'code': 'a', 'label': 'something', 'enable': 0}
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.single_line_keyword_check')
class ProcessKinemageKeywordTests(unittest.TestCase):
def setUp(self):
self.base_context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
def test_calls_single_line_keyword_check(self, mock_check):
inp = ['blah']
res = kin.process_kinemage_keyword(inp, self.base_context)
self.assertTrue(mock_check.called)
def test_with_well_formed_kinemage(self, mock_check):
inp = ['kinemage 1']
res = kin.process_kinemage_keyword(inp, self.base_context)
self.assertEqual(res, '1')
@mock.patch('pymolprobity.kinemage.single_line_keyword_check')
class ProcessGroupTests(unittest.TestCase):
def setUp(self):
self.base_context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
def test_calls_single_line_keyword_check(self, mock_check):
inp = ['blah']
res = kin.process_group(inp, self.base_context)
self.assertTrue(mock_check.called)
def test_with_dominant_group(self, mock_check):
inp = ['group {something} dominant']
res = kin.process_group(inp, self.base_context)
ref = ['something', 'dominant']
self.assertEqual(res, ref)
def test_with_animate_group(self, mock_check):
inp = ['group {something} animate']
res = kin.process_group(inp, self.base_context)
ref = ['something', 'animate']
self.assertEqual(res, ref)
@mock.patch('pymolprobity.kinemage.single_line_keyword_check')
class ProcessSubgroupTests(unittest.TestCase):
def setUp(self):
self.base_context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
def test_calls_single_line_keyword_check(self, mock_check):
inp = ['blah']
res = kin.process_subgroup(inp, self.base_context)
self.assertTrue(mock_check.called)
def test_with_dominant_subgroup(self, mock_check):
inp = ['subgroup {something} dominant']
res = kin.process_subgroup(inp, self.base_context)
ref = [None, 'something', 'dominant', None]
self.assertEqual(res, ref)
def test_with_dominant_before_name_subgroup(self, mock_check):
inp = ['subgroup dominant {something}']
res = kin.process_subgroup(inp, self.base_context)
ref = ['dominant', 'something', None, None]
self.assertEqual(res, ref)
def test_with_nobutton_dominant_subgroup(self, mock_check):
inp = ['subgroup {something} nobutton dominant']
res = kin.process_subgroup(inp, self.base_context)
ref = [None, 'something', 'nobutton', 'dominant']
self.assertEqual(res, ref)
| {
"content_hash": "d059360e06825833c1d3d519746dd6ed",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 80,
"avg_line_length": 36.996078431372545,
"alnum_prop": 0.6031905872376511,
"repo_name": "jaredsampson/pymolprobity",
"id": "312eb30ff4ef5ff73b8e82f0b851b3a4f8dc6b5e",
"size": "18868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kinemage_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "647"
},
{
"name": "Python",
"bytes": "181088"
}
],
"symlink_target": ""
} |
"""Contains optimizer base class."""
def _vectorize(fct):
"""Vectorize function so that it operates on and returns a list."""
# TODO: parallelize this with multiprocessing (but make that configurable)
# also, this (entire concept) doesn't exactly seem pretty
# principle of least astonishment and whatnot
def vectorized_function(list_of_args): # pylint: disable=missing-docstring
return [fct(arg) for arg in list_of_args]
vectorized_function.__doc__ = fct.__doc__
return vectorized_function
class Optimizer:
"""Base class for mathematical heuristic_optimization procedures.
We always use a vectorized objective function, i.e., function
evaluation is performed in batches. For some functions, this can
enable better performance.
Args:
objective_function: Function to be minimized.
obj_fct_is_vectorized: Boolean indicating whether the objective
function is already vectorized.
"""
def __init__(self, objective_function, obj_fct_is_vectorized=False):
if not obj_fct_is_vectorized:
objective_function = _vectorize(objective_function)
self._vectorized_objective_function = objective_function
self.historic_best_score = None
self.historic_best_position = None
def optimize(self):
"""Return argmin and min of the objective function."""
raise NotImplementedError
def compute_scores(self, positions):
"""Evaluate objective function at given positions.
Args:
positions: Iterable of arguments for the objective
function.
"""
scores = self._vectorized_objective_function(positions)
self._update_historic_best(positions, scores)
return scores
def _update_historic_best(self, positions, scores):
best_index, best_score = min(enumerate(scores), key=lambda x: x[1])
if self.historic_best_score is None or best_score < self.historic_best_score:
self.historic_best_score = best_score
self.historic_best_position = positions[best_index]
| {
"content_hash": "41af19259a1b8225f806f3bc0e23a29a",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 85,
"avg_line_length": 37.08771929824562,
"alnum_prop": 0.6764427625354777,
"repo_name": "tjanson/heuristic_optimization",
"id": "a476d4350f0f4c7784edd65ae9cfe7e9b7110bfe",
"size": "2114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heuristic_optimization/base/optimizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13349"
}
],
"symlink_target": ""
} |
from distutils.core import setup
# Make sure 'twisted' doesn't appear in top_level.txt
try:
from setuptools.command import egg_info
egg_info.write_toplevel_names
except (ImportError, AttributeError):
pass
else:
def _top_level_package(name):
return name.split('.', 1)[0]
def _hacked_write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[_top_level_package(k)
for k in cmd.distribution.iter_distribution_names()
if _top_level_package(k) != "twisted"
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs) + '\n')
egg_info.write_toplevel_names = _hacked_write_toplevel_names
with open('README.txt') as file:
long_description = file.read()
setup(name='punjab',
version='0.15',
description='Punjab, a twisted BOSH server.',
long_description = long_description,
author='Christopher Zorn',
author_email='tofu@thetofu.com',
zip_safe=False,
url='https://github.com.com/twonds/punjab',
packages=['punjab','punjab.xmpp', 'twisted.plugins'],
package_data={'twisted.plugins': ['twisted/plugins/punjab.py']}
)
| {
"content_hash": "74ce91b6cb5d7f7e630fd3cd010fa0ef",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 30.125,
"alnum_prop": 0.6273858921161826,
"repo_name": "athyuttamre/accessible-facebook-ui",
"id": "34c19d3a0a4696c7e426d762a1491dff9cad688c",
"size": "1205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public/conversejs/bosh_manager/punjab-master/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5538"
},
{
"name": "CSS",
"bytes": "222763"
},
{
"name": "CoffeeScript",
"bytes": "872"
},
{
"name": "JavaScript",
"bytes": "4148660"
},
{
"name": "PHP",
"bytes": "86207"
},
{
"name": "Perl",
"bytes": "3914"
},
{
"name": "Python",
"bytes": "233136"
},
{
"name": "R",
"bytes": "448"
},
{
"name": "Ruby",
"bytes": "37633"
},
{
"name": "Shell",
"bytes": "398"
}
],
"symlink_target": ""
} |
import hashlib, uuid
from pymongo import MongoClient
from base64 import b64encode
from Utilities import log
import os
import Config
from Memcached import memcached
class Database(object):
# Name of the algorithm to use for password encryption.
ENCRYPT_ALGORITHM = 'sha512'
# Constructor.
def __init__(self):
mongodb_addr = os.environ.get('MONGO_PORT_27017_TCP_ADDR')
if mongodb_addr:
log('MongoDB: ' + mongodb_addr)
self.db = MongoClient(mongodb_addr, 27017).lucida
else:
log('MongoDB: localhost')
self.db = MongoClient().lucida
self.users = self.db.users
# Returns the image collection of the user.
def get_image_collection(self, username):
images_collection = 'images_' + username
return self.db[images_collection]
# Returns the text collection of the user.
def get_text_collection(self, username):
text_collection = 'text_' + username
return self.db[text_collection]
# Adds a new user.
def add_user(self, username, firstname, lastname, password, email):
salt = uuid.uuid4().hex # thwart rainbow attack
hashed_password = self.hash_password(self.ENCRYPT_ALGORITHM,
salt, password)
self.users.insert_one({'username' : username,
'firstname': firstname, 'lastname': lastname,
'password': hashed_password, 'email': email})
# Add the password entry to memcached,
# which auto-expire after 60 seconds.
memcached.client.set(username, hashed_password, time=60)
# Returns true if password of the user is correct
def check_password(self, username, input_password):
# Try memcached first.
correct_password_in_db = memcached.client.get(username)
if not correct_password_in_db:
correct_password_in_db = (self.users.find_one
({'username': username}))['password']
memcached.client.set(username, correct_password_in_db, time=60)
salt = correct_password_in_db.split('$')[1]
generated_password = self.hash_password(self.ENCRYPT_ALGORITHM,
salt, input_password)
return correct_password_in_db == generated_password
# Generates a hashed password from the raw password.
def hash_password(self, algorithm, salt, password):
m = hashlib.new(algorithm)
password = password.encode('utf-8')
s = salt + password
m.update(s)
password_hash = m.hexdigest()
return "$".join([algorithm, salt, password_hash])
#Returns true if the username already exists.
def username_exists(self, username):
return not self.users.find_one({'username': username}) is None
# Adds the uploaded image.
def add_image(self, username, image_data, label, image_id):
self.get_image_collection(username).insert_one(
{'label': label, 'data': b64encode(image_data), # encoded
'image_id': image_id})
# Deletes the specified image.
def delete_image(self, username, image_id):
self.get_image_collection(username).remove({'image_id': image_id})
# Returns all the images by username.
def get_images(self, username):
log('Retrieving all images from images_' + username)
# Notice image['data'] was encoded using Base64.
return [image for image in self.get_image_collection(username).find()]
# Checks whether the user can add one more image.
def check_add_image(self, username):
if self.get_image_collection(username).count() >= \
Config.MAX_DOC_NUM_PER_USER:
raise RuntimeError('Sorry. You can only add ' +
str(Config.MAX_DOC_NUM_PER_USER) + \
' images at most')
# Returns the number of images by username.
def count_images(self, username):
log('Retrieving the number of images from images_' + username)
return self.get_image_collection(username).count()
# Adds the knowledge text.
def add_text(self, username, text_type, text_data, text_id):
self.get_text_collection(username).insert_one(
{'type': text_type, 'text_data': text_data,
'text_id': text_id})
# Deletes the knowledge text.
def delete_text(self, username, text_id):
self.get_text_collection(username).delete_one(
{'text_id': text_id})
# Returns the knowledge text by username.
def get_text(self, username):
log('Retrieving text from text_' + username)
return [text for text in self.get_text_collection(username).find()]
# Checks whether the user can add one more piece of text.
def check_add_text(self, username):
if self.get_text_collection(username).count() >= \
Config.MAX_DOC_NUM_PER_USER:
raise RuntimeError('Sorry. You can only add ' +
str(Config.MAX_DOC_NUM_PER_USER) + \
' pieces of text at most')
database = Database()
| {
"content_hash": "81d3a89953f2743d851d0706c02a2a5c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 72,
"avg_line_length": 35.624,
"alnum_prop": 0.7132270379519425,
"repo_name": "hoehnp/sirius",
"id": "311886bdc96d684913bb13e03bbc35c49884eda0",
"size": "4453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lucida/commandcenter/controllers/Database.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11598"
},
{
"name": "C++",
"bytes": "299595"
},
{
"name": "CSS",
"bytes": "2674"
},
{
"name": "Cuda",
"bytes": "30368"
},
{
"name": "HTML",
"bytes": "18333"
},
{
"name": "Java",
"bytes": "10371"
},
{
"name": "JavaScript",
"bytes": "5698"
},
{
"name": "Makefile",
"bytes": "16026"
},
{
"name": "Protocol Buffer",
"bytes": "1544"
},
{
"name": "Python",
"bytes": "19042"
},
{
"name": "Shell",
"bytes": "16034"
}
],
"symlink_target": ""
} |
import base64
import inspect
import os
import pickle
from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import TYPE_CHECKING, Callable, Optional, Sequence, TypeVar
import dill
from airflow.decorators.base import DecoratedOperator, task_decorator_factory
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.utils.python_virtualenv import remove_task_decorator, write_python_script
if TYPE_CHECKING:
from airflow.decorators.base import TaskDecorator
from airflow.utils.context import Context
def _generate_decode_command(env_var, file):
# We don't need `f.close()` as the interpreter is about to exit anyway
return (
f'python -c "import base64, os;'
rf'x = base64.b64decode(os.environ[\"{env_var}\"]);'
rf'f = open(\"{file}\", \"wb\"); f.write(x);"'
)
def _b64_encode_file(filename):
with open(filename, "rb") as file_to_encode:
return base64.b64encode(file_to_encode.read())
class _DockerDecoratedOperator(DecoratedOperator, DockerOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
"""
template_fields: Sequence[str] = ('op_args', 'op_kwargs')
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs: Sequence[str] = ('python_callable',)
def __init__(
self,
use_dill=False,
**kwargs,
) -> None:
command = "dummy command"
self.pickling_library = dill if use_dill else pickle
super().__init__(
command=command, retrieve_output=True, retrieve_output_path="/tmp/script.out", **kwargs
)
def execute(self, context: 'Context'):
with TemporaryDirectory(prefix='venv') as tmp_dir:
input_filename = os.path.join(tmp_dir, 'script.in')
script_filename = os.path.join(tmp_dir, 'script.py')
with open(input_filename, 'wb') as file:
if self.op_args or self.op_kwargs:
self.pickling_library.dump({'args': self.op_args, 'kwargs': self.op_kwargs}, file)
py_source = self._get_python_source()
write_python_script(
jinja_context=dict(
op_args=self.op_args,
op_kwargs=self.op_kwargs,
pickling_library=self.pickling_library.__name__,
python_callable=self.python_callable.__name__,
python_callable_source=py_source,
string_args_global=False,
),
filename=script_filename,
)
# Pass the python script to be executed, and the input args, via environment variables. This is
# more than slightly hacky, but it means it can work when Airflow itself is in the same Docker
# engine where this task is going to run (unlike say trying to mount a file in)
self.environment["__PYTHON_SCRIPT"] = _b64_encode_file(script_filename)
if self.op_args or self.op_kwargs:
self.environment["__PYTHON_INPUT"] = _b64_encode_file(input_filename)
else:
self.environment["__PYTHON_INPUT"] = ""
self.command = (
f"""bash -cx '{_generate_decode_command("__PYTHON_SCRIPT", "/tmp/script.py")} &&"""
f'{_generate_decode_command("__PYTHON_INPUT", "/tmp/script.in")} &&'
f'python /tmp/script.py /tmp/script.in /tmp/script.out\''
)
return super().execute(context)
def _get_python_source(self):
raw_source = inspect.getsource(self.python_callable)
res = dedent(raw_source)
res = remove_task_decorator(res, "@task.docker")
return res
T = TypeVar("T", bound=Callable)
def docker_task(
python_callable: Optional[Callable] = None,
multiple_outputs: Optional[bool] = None,
**kwargs,
) -> "TaskDecorator":
"""
Python operator decorator. Wraps a function into an Airflow operator.
Also accepts any argument that DockerOperator will via ``kwargs``. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. List/Tuples will unroll to xcom values
with index as key. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_DockerDecoratedOperator,
**kwargs,
)
| {
"content_hash": "315030f145e4d89f5a46e29b47fe7846",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 107,
"avg_line_length": 39.75757575757576,
"alnum_prop": 0.6373856707317073,
"repo_name": "bolkedebruin/airflow",
"id": "ea5c6958b949d29dfcca4855fa5369ee95038111",
"size": "6034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/docker/decorators/docker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
import base64
import os
import re
import time
from common import format
COLOR_YELLOW = "#ffff88"
COLOR_GREEN = "#aaffaa"
COLOR_PINK = "#ffaaaa"
COLOR_BLUE = "#ccccff"
COLOR_WHITE = "#ffffff"
def _get_file_by_name(dir_, name):
"""Finds the file with given name relative to this module."""
# HACK to fall back on our feet when deployed through py2exe
here = os.path.dirname(os.path.abspath(__file__))
if "\\" in here:
while "library.zip" in here:
here = "\\".join(here.split("\\")[:-1])
return os.path.join(here, dir_, name)
def _get_template(name):
"""Finds the template with given name relative to this module."""
return _get_file_by_name("templates", name)
def _subject_level(review):
"""Returns a formatted level string for the target."""
if _is_certification(review):
return u"%s->%s" % (review.existing_level, review.new_level)
else:
return review.existing_level
TL_RE = re.compile(r"\btl\b|\bteam lead")
def _is_recommendation(review):
"""Guess if the review is a recommendation.
Valid recommendations are:
- L3 recommendation
- GP TL check by a L4+
"""
if review.reviewer_level not in "345":
return False
text = (review.comments.lower() +
review.strengths.lower() +
review.city.lower())
rec_pos = text.find("recommend")
while rec_pos >= 0:
text_around = text[max(0, rec_pos - 45):rec_pos + 60]
if "level 3" in text_around \
or "l3" in text_around \
or "lv3" in text_around \
or "level three" in text_around \
or "written" in text_around:
return True
if (TL_RE.search(text_around)
and review.reviewer_level in "45"):
return True
rec_pos = text.find("recommend", rec_pos + 1)
if review.reviewer_level in "45":
tl_search = TL_RE.search(text)
while tl_search:
text_around = text[max(0, tl_search.start() - 15):tl_search.end() + 25]
if "check" in text_around \
or "capability" in text_around:
return True
tl_search = TL_RE.search(text, tl_search.end() + 1)
return False
def _is_self_review(review):
return review.observer == review.subject
def _is_certification(review):
return bool(review.new_level)
def _is_no_promotion(review):
return review.new_level == review.existing_level
def _is_promotion(review):
return review.new_level and review.new_level > review.existing_level
def _is_demotion(review):
return review.new_level and review.new_level < review.existing_level
def _is_renewal(review):
return review.type_ == "Renewal"
def _get_icon(name):
filename = _get_file_by_name("icons", name)
filetype = name.split(".")[-1]
with open(filename, "rb") as f:
binary_data = f.read()
encoded_data = base64.b64encode(binary_data)
style = ("background: url(data:image/%s;base64,%s) no-repeat;"
" width: 16px; height: 16px; float: left;"
% (filetype, encoded_data))
return '<div class="noprint icon" style="%s"></div>' % style
REVIEW_TYPES = [
(_is_promotion, "Promotion", _get_icon("chart_up_color.png")),
(_is_no_promotion, "No promotion test", _get_icon("chart_line.png")),
(_is_demotion, "Demotion", _get_icon("chart_down_color.png")),
(_is_recommendation, "(maybe) Recommendation", _get_icon("tick.png")),
(_is_renewal, "Renewal", _get_icon("cake.png")),
(_is_self_review, "Self-Review", _get_icon("dashboard.png")),
]
def _type_icon(review):
for criterion, _, icon_html in REVIEW_TYPES:
if criterion(review) and icon_html:
return icon_html
style = " width: 16px; height: 16px; float: left;"
return '<div class="noprint icon" style="%s"></div>' % style
def _collate(*element_list):
"""Returns HTML-formatted elements side-by-side in a table.
Each element_list must be a list of HTML-formatted text.
"""
width = len(element_list)
height = max(len(element) for element in element_list)
table = ['<table><tbody>']
for row_idx in xrange(height):
table.append('<tr>')
for col_idx in xrange(width):
table.append('<td>')
if row_idx < len(element_list[col_idx]):
table.append(element_list[col_idx][row_idx])
table.append('</td>')
table.append('</tr>')
table.append('</tbody></table>')
return "".join(table)
def _make_type_legends(reviews):
legends_needed = []
for criterion, label, icon_html in REVIEW_TYPES:
if any(criterion(review) for review in reviews):
legends_needed.append((label, icon_html))
return ['%s%s' % (icon_html, label)
for label, icon_html in legends_needed]
RATING_CLASSES = [
("Outstanding", "outstanding"),
("Above Average", "above"),
("Average", ""),
("Below Average", "below"),
]
def _make_rating_legends(reviews):
ratings_needed = set()
for review in reviews:
if review.type_ == "Renewal":
continue
ratings_needed.add(review.comparison)
result = []
for rating, class_ in RATING_CLASSES:
if rating in ratings_needed:
result.append('<span class="%s">%s</span>' %
(class_, rating))
return result
def _make_legend(reviews):
legend_contents = _collate(_make_type_legends(reviews),
_make_rating_legends(reviews))
if not legend_contents:
return ""
return '<div class="noprint">Legend:%s</div>' % legend_contents
def _exam_score(review):
"""Returns an HTML-formatted exam score, if applicable."""
if review.type_ == "Evaluation":
return u""
return u"<p>Scored %s on written exam." % review.exam_score
def _reviewer_level(review):
"""Returns the formatted level of the reviewer, if known."""
if review.type_ == "Renewal":
# The reviewer's level is not present in these, so we don't show anything
return ""
return "(%s)" % review.reviewer_level
def _rated(review):
"""Returns the HTML-formatted rating, if present."""
if review.type_ == "Renewal":
# These don't include a rating, so we don't show anything
return ""
return '<p>Rated "%s."' % review.comparison
def _rated_class(review):
"""Returns a CSS class according to the rating."""
if review.type_ == "Renewal":
# These don't include a rating, so we don't show anything
return ""
for rating, class_ in RATING_CLASSES:
if review.comparison == rating:
return class_
return ""
def _is_in_last_12_months(review):
"""Returns True iff review has been entered in last 12 months."""
today_iso = time.strftime("%Y%m%d")
year = int(today_iso[:4])
last_year_iso = "%s%s" % (year - 1, today_iso[4:])
review_date = review.entered_date.replace("-", "")
return review_date >= last_year_iso
def render_review(review, row_class=""):
return format.render_template(_get_template("review.html"),
review=review,
type_icon=_type_icon(review),
reviewer_level=_reviewer_level(review),
subject_level=_subject_level(review),
exam_score=_exam_score(review),
rated=_rated(review),
rated_class=_rated_class(review),
row_class=row_class,
)
def render_reviews(reviews, title):
# Shortcut
if not reviews:
return ""
# Sort by date
reviews_by_date = sorted(reviews,
key=lambda r: r.entered_date,
reverse=True)
# Last review in the last 12 months marks end
last_review_this_year = None
for review in reviews_by_date:
if _is_in_last_12_months(review):
last_review_this_year = review
rendered_reviews = []
for review in reviews_by_date:
if review == last_review_this_year:
row_class = "separator"
else:
row_class = ""
rendered_reviews.append(render_review(review, row_class))
full_html = format.render_template(
_get_template("reviews.html"),
intro=_make_legend(reviews),
body="".join(rendered_reviews),
title=title,
all_review_ids="[%s]" % ",".join(str(r.id_) for r in reviews_by_date))
return full_html.encode("utf-8")
| {
"content_hash": "5b2c355fc8d8cf82c9f492f40ed89765",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 77,
"avg_line_length": 29.93040293040293,
"alnum_prop": 0.6209766246481458,
"repo_name": "danielkitachewsky/reviewder",
"id": "10ea4cd86d6af58ee006ba9752e6fc9ac9488b1a",
"size": "8172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewder/review_format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "660635"
},
{
"name": "Python",
"bytes": "1200971"
}
],
"symlink_target": ""
} |
import application
import unittest
from application import application
from flask import Flask, current_app, request, Response
""" Main test cases for our application """
class AppTestCase(unittest.TestCase):
#application = Flask(__name__)
def setUp(self):
application.testing = True
with application.app_context():
self.client = current_app.test_client()
def test_load_config(self):
""" Test that we can load our config properly """
self.assertTrue(1)
def test_get_test(self):
""" Test hitting /test and that we get a correct HTTP response """
self.assertTrue(1)
def test_get_form(self):
""" Test that we can get a signup form """
self.assertTrue(1)
def test_get_user(self):
""" Test that we can get a user context """
self.assertTrue(1)
def test_login(self):
""" Test that we can authenticate as a user """
self.assertTrue(1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f6c7d50e4461d3d9c0240cbc12d5b071",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 23.34090909090909,
"alnum_prop": 0.6183057448880234,
"repo_name": "BongChan/py-flask-signup",
"id": "43418dbaa9e4c74d5bb44c9b83dd761d3826543f",
"size": "1643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/application-tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6450"
},
{
"name": "Python",
"bytes": "7452"
}
],
"symlink_target": ""
} |
import re
import redis
import logging
import alias.config
logger = logging.getLogger('DB')
email_re = re.compile(r'.*@.*\..*')
cfg = alias.config.AliasConfig()
logger.info('Configuring databases.')
user_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.user_db)
email_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.email_db)
nym_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.nym_db)
loc_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.loc_db)
url_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.url_db)
name_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.name_db)
about_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.about_db)
image_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.image_db)
admin_db = redis.StrictRedis(host='localhost', port=6379, db=cfg.admin_db)
def load_new_targets(targets):
logger.info('Loading new targets.')
# Make sure the key_id is available before adding data.
if admin_db.get('key_id') is None:
admin_db.set('key_id', 1)
# Add usernames and email addresses.
count = 0
for target in targets.split(','):
target = target.lower().strip()
if target == '':
continue
if email_re.search(target) is None:
if add_new_target(target, 'user'):
count += 1
else:
if add_new_target(target, 'email'):
count += 1
return count
def get_all_targets():
'''
Return a list of all targets.
'''
logger.debug('Get all targets.')
targets = [user_db.hget(i, 'key') for i in user_db.keys('id:*')]
return targets
def get_unchecked_targets(source, key_type):
'''
Return a list of targets that have not been looked up at the specified
source. In addition, make sure the key_type matches the requested type.
Some sources work well with usernames, others with emails, and others will
work with either.
'''
logger.debug('Getting unchecked targets for {0}.'.format(source))
targets = []
if source not in cfg.valid_sources:
return targets
if key_type not in ['user', 'email', 'all']:
return targets
for tid in user_db.keys('id:*'):
user_type = user_db.hget(tid, 'type')
if (key_type != user_type) and (key_type != 'all'):
continue
if user_db.hget(tid, source) == '0':
targets.append(user_db.hget(tid, 'key'))
return targets
def get_targets_with_data():
'''
Return a list of all targets that have data associated with them.
'''
logger.debug('Getting all targets with data.')
# Create a set of all the ids in each database
ids = set(email_db.keys('id:*'))
ids = ids.union(nym_db.keys('id:*'), url_db.keys('id:*'))
ids = ids.union(loc_db.keys('id:*'), name_db.keys('id:*'))
ids = ids.union(about_db.keys('id:*'), image_db.keys('id:*'))
targets = [user_db.hget(i, 'key') for i in ids]
return targets
def add_new_target(target, key_type):
'''
Adds a new target to the database.
'''
target = target.strip()
logger.debug(u'Adding new target {0}.'.format(target))
tid = user_db.get(target)
if tid is None:
key_id = admin_db.get('key_id')
admin_db.incr('key_id')
data = {'key': target, 'type': key_type}
for source in cfg.valid_sources:
data[source] = '0'
tid = 'id:' + key_id
user_db.hmset(tid, data)
user_db.set(target, tid)
return True
return False
def mark_source_complete(target, source):
'''
Change the value of the specified source from 0 to 1 to indicate that
this source has been checked for this user.
'''
logger.debug(u'Marking {0} complete for {1}.'.format(source, target))
if source in cfg.valid_sources:
tid = user_db.get(target)
user_db.hset(tid, source, '1')
def add_target_to_source_list(target, source):
'''
Add target to the list of other targets with data from the specified
source.
'''
logger.debug(u'Adding {0} to source list {1}.'.format(target, source))
if source in cfg.valid_sources:
tid = user_db.get(target)
admin_db.lpush('source:' + source, tid)
def get_targets_from_source(source):
'''
Return all targets with data from the specified source.
'''
logger.debug(u'Getting all targets associated with {0}.'.format(source))
if source in cfg.valid_sources:
tids = admin_db.lrange('source:' + source, 0, -1)
return sorted([user_db.hget(tid, 'key') for tid in tids])
def get_sources_with_data():
'''
Get all sources that have data associated with them.
'''
logger.debug('Getting list of sources with target data.')
return sorted([s.split(':')[1] for s in admin_db.keys('source:*')])
def get_target_data(target):
logger.debug(u'Getting all data associated with {0}.'.format(target))
tid = user_db.get(target)
data = {}
data['emails'] = email_db.lrange(tid, 0, -1)
data['nyms'] = nym_db.lrange(tid, 0, -1)
data['urls'] = url_db.lrange(tid, 0, -1)
data['locs'] = loc_db.lrange(tid, 0, -1)
data['names'] = name_db.lrange(tid, 0, -1)
data['abouts'] = about_db.lrange(tid, 0, -1)
data['images'] = image_db.lrange(tid, 0, -1)
return data
def get_correlated_targets(dataset, target):
logger.debug(u'Getting all targets using the {0} {1}.'.format(dataset,
target))
if dataset == 'email':
ids = email_db.lrange(target, 0, -1)
elif dataset == 'nym':
ids = nym_db.lrange(target, 0, -1)
elif dataset == 'name':
ids = name_db.lrange(target, 0, -1)
else:
return None
return [user_db.hget(i, 'key') for i in ids]
def add_target_email(target, address):
logger.debug(u'Adding new email {0} to {1}.'.format(address, target))
address = address.strip()
tid = user_db.get(target)
email_db.lpush(tid, address.lower())
email_db.lpush(address.lower(), tid)
def add_target_nym(target, nym):
logger.debug(u'Adding pseudonym {0} to {1}.'.format(nym, target))
nym = nym.strip()
tid = user_db.get(target)
nym_db.lpush(tid, nym)
nym_db.lpush(nym, tid)
def add_target_url(target, url):
logger.debug(u'Adding url {0} to {1}.'.format(url, target))
url = url.strip()
tid = user_db.get(target)
url_db.lpush(tid, url)
def add_target_location(target, location):
logger.debug(u'Adding location {0} to {1}.'.format(location, target))
location = location.strip()
tid = user_db.get(target)
loc_db.lpush(tid, location)
def add_target_name(target, name):
logger.debug(u'Adding name {0} to {1}.'.format(name, target))
name = name.strip()
tid = user_db.get(target)
name_db.lpush(tid, name)
name_db.lpush(name, tid)
def add_target_description(target, desc):
logger.debug(u'Adding desc {0} to {1}.'.format(desc[:40], target))
desc = desc.strip()
tid = user_db.get(target)
about_db.lpush(tid, desc)
def add_target_image(target, url):
logger.debug(u'Adding image URL {0} to {1}.'.format(url, target))
url = url.strip()
tid = user_db.get(target)
image_db.lpush(tid, url)
| {
"content_hash": "f4c75d48d4c77019238cb0187b581de7",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 78,
"avg_line_length": 29.975510204081633,
"alnum_prop": 0.6139705882352942,
"repo_name": "averagesecurityguy/alias",
"id": "5736f8aa5bfb3b43ac7db44a6862c3cfbcfcd473",
"size": "7369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alias/db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "Python",
"bytes": "34306"
},
{
"name": "Shell",
"bytes": "927"
}
],
"symlink_target": ""
} |
from pprint import pformat
from django.core.management.base import BaseCommand
from django.conf import settings
from ..bower import bower_adapter
from ..exceptions import BowerNotInstalled
class BaseBowerCommand(BaseCommand):
"""Base management command with bower support"""
def handle(self, *args, **options):
self._check_bower_exists()
bower_adapter.create_components_root()
def _check_bower_exists(self):
"""Check bower exists or raise exception"""
if not bower_adapter.is_bower_exists():
raise BowerNotInstalled()
def _install(self, args):
bower_adapter.install(settings.BOWER_INSTALLED_APPS, *args)
def _freeze(self):
packages = tuple(bower_adapter.freeze())
output = 'BOWER_INSTALLED_APPS = {0}'.format(
pformat(packages),
)
self.stdout.write(output)
| {
"content_hash": "067f3e1e00a5eda67f45ca2fbadf3c67",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 67,
"avg_line_length": 31.392857142857142,
"alnum_prop": 0.6712172923777019,
"repo_name": "ramcn/demo3",
"id": "baa925a8df164884cee679951598348829f99d5b",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python3.4/site-packages/djangobower/management/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "330662"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "7"
},
{
"name": "HTML",
"bytes": "252755"
},
{
"name": "JavaScript",
"bytes": "136464"
},
{
"name": "Python",
"bytes": "11000226"
},
{
"name": "Shell",
"bytes": "3753"
}
],
"symlink_target": ""
} |
import os
import uuid
import json
import subprocess
from insalata.builder.decorator import builderFor
from insalata.helper.ansibleWrapper import addToKnownHosts
@builderFor(action="configureFirewall", template=["iptables"])
def configureIpTables(logger, host):
"""
Set iptables firewall rules on this host.
:param logger: A logger used for logging possible errors.
:type logger: seealso:: :class:`logging:Logger`
:param host: The target host that references firewall rules to set
:type host: Host
"""
target = host.getID() if host.getNameApplied() else host.getTemplate().getID()
addToKnownHosts(target)
#check if raw iptables are given with the hosts raw-attribute
raw = host.getFirewallRaw()
if raw and raw.getFirewall().lower() == "iptables":
configureIpTablesRaw(logger, host.getID(), target, raw)
else:
configureIpTablesFromSimple(logger, host.getID(), target, host.getFirewallRules())
def configureIpTablesRaw(logger, hostId, target, raw):
"""
Set iptables firewall rules from a raw dump.
:param logger: A logger used for logging possible errors.
:type logger: seealso:: :class:`logging:Logger`
:param hostId: The identifier of the host that references firewall rules to set
:type hostId: str
:param target: The target name to use for the Ansible playbook.
:type target: str
:param raw: A raw firewall data dump to apply directly.
:type raw: insalata.model.FirewallRule.FirewallRaw
"""
#build json with the raw data
data = {
"target": target,
"raw": raw.getData()
}
filename = str(uuid.uuid4()) + ".json"
with open(filename, 'w') as outfile:
json.dump(data, outfile)
#run with json
logger.info("[{}] Configure firewall with raw data on machine named '{}'.".format(hostId, target))
subprocess.call('ansible-playbook /etc/insalata/template/ansible/firewall/iptables_raw.yml --extra-vars "@' + filename + '"', shell=True)
#remove json
if os.path.exists(filename):
os.remove(filename)
def configureIpTablesFromSimple(logger, hostId, target, simplerules):
"""
Set iptables firewall rules from a list of simplified rules.
:param logger: A logger used for logging possible errors.
:type logger: seealso:: :class:`logging:Logger`
:param hostId: The identifier of the host that references firewall rules to set
:type hostId: str
:param target: The target name to use for the Ansible playbook.
:type target: str
:param simplerules: A list of simplified rules to apply as iptable rules.
:type simplerules: list(insalata.model.FirewallRule.FirewallRule)
"""
#build json with all firewall rules
data = {
"target": target,
"rules": [{
"chain": r.getChain(),
"action": r.getAction().upper(),
"protocol": r.getProtocol(),
"srcnet": r.getSrcNet(),
"destnet": r.getDestNet(),
"sports" : r.getSrcPorts(),
"dports" : r.getDestPorts(),
"in_interface": None if (not r.getInInterface()) else r.getInInterface().getID(),
"out_interface": None if (not r.getOutInterface()) else r.getOutInterface().getID()
} for r in simplerules]
}
filename = str(uuid.uuid4()) + ".json"
with open(filename, 'w') as outfile:
json.dump(data, outfile)
#run with json
logger.info("[{0}] Configure firewall with simplified rules.".format(hostId))
subprocess.call('ansible-playbook /etc/insalata/template/ansible/firewall/iptables_from_simple.yml --extra-vars "@' + filename + '" -v -c paramiko', shell=True)
#remove json
if os.path.exists(filename):
os.remove(filename) | {
"content_hash": "019ce1b2e9e26f6e7b6dddd44aa015cd",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 164,
"avg_line_length": 34.57798165137615,
"alnum_prop": 0.6662244627222075,
"repo_name": "tumi8/INSALATA",
"id": "bac28a375ecd0006c296ac974abefa2524169b37",
"size": "3769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/insalata/builder/config/firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "368324"
},
{
"name": "Shell",
"bytes": "718"
}
],
"symlink_target": ""
} |
import os
from enum import Enum, auto
from typing import List
from dotenv import load_dotenv
class VMModelType(Enum):
VM = auto()
VMSS = auto()
class ExtensionMetaData:
def __init__(self, publisher: str, ext_type: str, version: str, ext_name: str = ""):
self.__publisher = publisher
self.__ext_type = ext_type
self.__version = version
self.__ext_name = ext_name
@property
def publisher(self) -> str:
return self.__publisher
@property
def ext_type(self) -> str:
return self.__ext_type
@property
def version(self) -> str:
return self.__version
@property
def name(self):
return self.__ext_name
@name.setter
def name(self, ext_name):
self.__ext_name = ext_name
@property
def handler_name(self):
return f"{self.publisher}.{self.ext_type}"
class VMMetaData:
def __init__(self, vm_name: str, rg_name: str, sub_id: str, location: str, admin_username: str,
ips: List[str] = None):
self.__vm_name = vm_name
self.__rg_name = rg_name
self.__sub_id = sub_id
self.__location = location
self.__admin_username = admin_username
vm_ips, vmss_ips = _get_ips(admin_username)
# By default assume the test is running on a VM
self.__type = VMModelType.VM
self.__ips = vm_ips
if any(vmss_ips):
self.__type = VMModelType.VMSS
self.__ips = vmss_ips
if ips is not None:
self.__ips = ips
print(f"IPs: {self.__ips}")
@property
def name(self) -> str:
return self.__vm_name
@property
def rg_name(self) -> str:
return self.__rg_name
@property
def location(self) -> str:
return self.__location
@property
def sub_id(self) -> str:
return self.__sub_id
@property
def admin_username(self):
return self.__admin_username
@property
def ips(self) -> List[str]:
return self.__ips
@property
def model_type(self):
return self.__type
def _get_ips(username) -> (list, list):
"""
Try fetching Ips from the files that we create via az-cli.
We do a best effort to fetch this from both orchestrator or the test VM. Its located in different locations on both
scenarios.
Returns: Tuple of (VmIps, VMSSIps).
"""
vms, vmss = [], []
orchestrator_path = os.path.join(os.environ['BUILD_SOURCESDIRECTORY'], "dcr")
test_vm_path = os.path.join("/home", username, "dcr")
for ip_path in [orchestrator_path, test_vm_path]:
vm_ip_path = os.path.join(ip_path, ".vm_ips")
if os.path.exists(vm_ip_path):
with open(vm_ip_path, 'r') as vm_ips:
vms.extend(ip.strip() for ip in vm_ips.readlines())
vmss_ip_path = os.path.join(ip_path, ".vmss_ips")
if os.path.exists(vmss_ip_path):
with open(vmss_ip_path, 'r') as vmss_ips:
vmss.extend(ip.strip() for ip in vmss_ips.readlines())
return vms, vmss
def get_vm_data_from_env() -> VMMetaData:
if get_vm_data_from_env.__instance is None:
load_dotenv()
get_vm_data_from_env.__instance = VMMetaData(vm_name=os.environ["VMNAME"],
rg_name=os.environ['RGNAME'],
sub_id=os.environ["SUBID"],
location=os.environ['LOCATION'],
admin_username=os.environ['ADMINUSERNAME'])
return get_vm_data_from_env.__instance
get_vm_data_from_env.__instance = None
| {
"content_hash": "775a5ea1a39cffaee62c79aeaf5459d5",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 119,
"avg_line_length": 27.16788321167883,
"alnum_prop": 0.555077915099409,
"repo_name": "Azure/WALinuxAgent",
"id": "806c830c124b0144b99b9875061daac14ae21450",
"size": "3722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dcr/scenario_utils/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3073264"
},
{
"name": "Shell",
"bytes": "19249"
}
],
"symlink_target": ""
} |
"""A module to allow option processing from files or registry."""
import argparse
import copy
import pdb
import sys
# A global flags parser
class GRRArgParser(argparse.ArgumentParser):
pass
PARSER = GRRArgParser(description="GRR Rapid Response")
FLAGS = None
# Helper functions for setting options on the global parser object
# pylint: disable=g-bad-name,redefined-builtin
def DEFINE_string(longopt, default, help):
PARSER.add_argument("--%s" % longopt, default=default, type=str,
help=help)
def DEFINE_bool(longopt, default, help):
PARSER.add_argument("--%s" % longopt, dest=longopt, action="store_true",
help=help)
PARSER.add_argument("--no%s" % longopt, dest=longopt, action="store_false",
help=help)
PARSER.set_defaults(**{longopt: default})
def DEFINE_integer(longopt, default, help):
PARSER.add_argument("--%s" % longopt, default=default, type=int,
help=help)
def DEFINE_float(longopt, default, help):
PARSER.add_argument("--%s" % longopt, default=default, type=float,
help=help)
def DEFINE_enum(longopt, default, choices, help):
PARSER.add_argument("--%s" % longopt, default=default, choices=choices,
type="choice", help=help)
class ListParser(argparse.Action):
"""Parse input as a comma separated list of strings."""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values.split(","))
def DEFINE_list(longopt, default, help):
PARSER.add_argument("--%s" % longopt, default=default,
action=ListParser, help=help)
DEFINE_bool("verbose", default=False,
help="Turn of verbose logging.")
DEFINE_bool("debug", default=False,
help="When an unhandled exception occurs break in the "
"debugger.")
def FlagOverrider(**flag_kwargs):
"""A Helpful decorator which can switch the flag values temporarily."""
def Decorator(f):
"""Allow a function to safely change flags, restoring them on return."""
def Decorated(*args, **kwargs):
global FLAGS
old_flags = copy.copy(FLAGS)
for k, v in flag_kwargs.items():
setattr(FLAGS, k, v)
try:
return f(*args, **kwargs)
finally:
FLAGS = old_flags
return Decorated
return Decorator
def StartMain(main, argv=None):
"""The main entry point to start applications.
Parses flags and catches all exceptions for debugging.
Args:
main: A main function to call.
argv: The argv to parse. Default from sys.argv.
"""
global FLAGS
FLAGS = PARSER.parse_args(args=argv)
# Call the main function
try:
main([sys.argv[0]])
except Exception:
if FLAGS.debug:
pdb.post_mortem()
raise
| {
"content_hash": "b16628455e31511a931ecd7c17a29bc5",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 77,
"avg_line_length": 25.125,
"alnum_prop": 0.6488983653162758,
"repo_name": "MiniSEC/GRR_clone",
"id": "0cc5a9488e9ab3d6c7ba577386d8cbdd3f79f1f9",
"size": "2836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55093"
},
{
"name": "CSS",
"bytes": "153862"
},
{
"name": "JavaScript",
"bytes": "633797"
},
{
"name": "Python",
"bytes": "2863055"
},
{
"name": "Shell",
"bytes": "7959"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.